summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2009-02-17 11:44:14 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-02-23 10:48:58 +1100
commit60ee031940c1b09c137b617a8829e2f081961fe0 (patch)
treeeb61ad956dc3b6e2e4e5f7434451fddc97cda160 /arch/powerpc
parent13870b657578bcce167978ee93dc02bf54e3beb0 (diff)
powerpc/spufs: Use correct return value for spu_handle_mm_fault
Currently, spu_handle_mm_fault disregards the 'ret' variable and always returns -EFAULT on error. This change refactos spu_handle_mm_fault a little, to return the ret variable as appropriate. This allows us to combine the error and sucess paths. Also, remove the #if-0-ed IS_VALID_EA() check, it has never been used. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index c8b1cd42905d..95d8dadf2d87 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -39,60 +39,56 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long is_write;
int ret;
-#if 0
- if (!IS_VALID_EA(ea)) {
+ if (mm == NULL)
return -EFAULT;
- }
-#endif /* XXX */
- if (mm == NULL) {
- return -EFAULT;
- }
- if (mm->pgd == NULL) {
+
+ if (mm->pgd == NULL)
return -EFAULT;
- }
down_read(&mm->mmap_sem);
+ ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= ea)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, ea))
- goto bad_area;
-good_area:
+ goto out_unlock;
+
+ if (ea < vma->vm_start) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto out_unlock;
+ if (expand_stack(vma, ea))
+ goto out_unlock;
+ }
+
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
+ goto out_unlock;
} else {
if (dsisr & MFC_DSISR_ACCESS_DENIED)
- goto bad_area;
+ goto out_unlock;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
+ goto out_unlock;
}
+
ret = 0;
*flt = handle_mm_fault(mm, vma, ea, is_write);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
- goto bad_area;
+ goto out_unlock;
} else if (*flt & VM_FAULT_SIGBUS) {
ret = -EFAULT;
- goto bad_area;
+ goto out_unlock;
}
BUG();
}
+
if (*flt & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
- up_read(&mm->mmap_sem);
- return ret;
-bad_area:
+out_unlock:
up_read(&mm->mmap_sem);
- return -EFAULT;
+ return ret;
}
EXPORT_SYMBOL_GPL(spu_handle_mm_fault);