aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spu_fault.c
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-20 16:39:59 +0900
committerPaul Mackerras <paulus@samba.org>2007-12-21 19:46:19 +1100
commit7cd58e43810852eeb7af5a0c803f3890bd08b581 (patch)
treed9ea5c0102d70c26c4a9b18aaf4db4e3b6d48fc1 /arch/powerpc/platforms/cell/spu_fault.c
parent9b1d21f858e8bad750ab19cac23dcbf79d099be3 (diff)
[POWERPC] spufs: move fault, lscsa_alloc and switch code to spufs module
Currently, part of the spufs code (switch.o, lscsa_alloc.o and fault.o) is compiled directly into the kernel. This change moves these components of spufs into the kernel. The lscsa and switch objects are fairly straightforward to move in. For the fault.o module, we split the fault-handling code into two parts: a/p/p/c/spu_fault.c and a/p/p/c/spufs/fault.c. The former is for the in-kernel spu_handle_mm_fault function, and we move the rest of the fault-handling code into spufs. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_fault.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c98
1 files changed, 98 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
new file mode 100644
index 00000000000..c8b1cd42905
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -0,0 +1,98 @@
+/*
+ * SPU mm fault handler
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ * Author: Jeremy Kerr <jk@ozlabs.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+
+/*
+ * This ought to be kept in sync with the powerpc specific do_page_fault
+ * function. Currently, there are a few corner cases that we haven't had
+ * to handle fortunately.
+ */
+int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt)
+{
+ struct vm_area_struct *vma;
+ unsigned long is_write;
+ int ret;
+
+#if 0
+ if (!IS_VALID_EA(ea)) {
+ return -EFAULT;
+ }
+#endif /* XXX */
+ if (mm == NULL) {
+ return -EFAULT;
+ }
+ if (mm->pgd == NULL) {
+ return -EFAULT;
+ }
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, ea);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= ea)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, ea))
+ goto bad_area;
+good_area:
+ is_write = dsisr & MFC_DSISR_ACCESS_PUT;
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (dsisr & MFC_DSISR_ACCESS_DENIED)
+ goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+ ret = 0;
+ *flt = handle_mm_fault(mm, vma, ea, is_write);
+ if (unlikely(*flt & VM_FAULT_ERROR)) {
+ if (*flt & VM_FAULT_OOM) {
+ ret = -ENOMEM;
+ goto bad_area;
+ } else if (*flt & VM_FAULT_SIGBUS) {
+ ret = -EFAULT;
+ goto bad_area;
+ }
+ BUG();
+ }
+ if (*flt & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ up_read(&mm->mmap_sem);
+ return ret;
+
+bad_area:
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(spu_handle_mm_fault);