staging: tidspbridge: protect dmm_map properly

We need to protect not only the dmm_map list, but the individual
map_obj's, otherwise, we might be building the scatter-gather list with
garbage. So, use the existing proc_lock for that.

I observed race conditions which caused kernel panics while running
stress tests, also, Tuomas Kulve found it happening quite often in
Gumstix Over. This patch fixes those.

Cc: Tuomas Kulve <tuomas@kulve.fi>
Signed-off-by: Felipe Contreras <felipe.contreras@nokia.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Cc: stable <stable@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Felipe Contreras 2011-03-11 18:29:06 -06:00 committed by Greg Kroah-Hartman
parent 17e2a54203
commit ab42abf33a
1 changed files with 14 additions and 5 deletions

View File

@ -779,12 +779,14 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
(u32)pmpu_addr, (u32)pmpu_addr,
ul_size, dir); ul_size, dir);
mutex_lock(&proc_lock);
/* find requested memory are in cached mapping information */ /* find requested memory are in cached mapping information */
map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
if (!map_obj) { if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__); pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT; status = -EFAULT;
goto err_out; goto no_map;
} }
if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
@ -793,6 +795,8 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
status = -EFAULT; status = -EFAULT;
} }
no_map:
mutex_unlock(&proc_lock);
err_out: err_out:
return status; return status;
@ -817,21 +821,24 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
(u32)pmpu_addr, (u32)pmpu_addr,
ul_size, dir); ul_size, dir);
mutex_lock(&proc_lock);
/* find requested memory are in cached mapping information */ /* find requested memory are in cached mapping information */
map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
if (!map_obj) { if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__); pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT; status = -EFAULT;
goto err_out; goto no_map;
} }
if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
pr_err("%s: InValid address parameters %p %x\n", pr_err("%s: InValid address parameters %p %x\n",
__func__, pmpu_addr, ul_size); __func__, pmpu_addr, ul_size);
status = -EFAULT; status = -EFAULT;
goto err_out;
} }
no_map:
mutex_unlock(&proc_lock);
err_out: err_out:
return status; return status;
} }
@ -1724,9 +1731,8 @@ int proc_un_map(void *hprocessor, void *map_addr,
(p_proc_object->bridge_context, va_align, size_align); (p_proc_object->bridge_context, va_align, size_align);
} }
mutex_unlock(&proc_lock);
if (status) if (status)
goto func_end; goto unmap_failed;
/* /*
* A successful unmap should be followed by removal of map_obj * A successful unmap should be followed by removal of map_obj
@ -1735,6 +1741,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
*/ */
remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
unmap_failed:
mutex_unlock(&proc_lock);
func_end: func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
__func__, hprocessor, map_addr, status); __func__, hprocessor, map_addr, status);