diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/compat.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-4k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-64k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc32.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/seccomp.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/align.c | 36 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/copyuser_64.S | 38 | ||||
-rw-r--r-- | arch/powerpc/lib/memcpy_64.S | 26 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/ps3/mm.c | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ppc4xx_pci.c | 17 |
12 files changed, 104 insertions, 40 deletions
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index d811a8cd7b5..4774c2f9223 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -210,5 +210,10 @@ struct compat_shmid64_ds { compat_ulong_t __unused6; }; +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_32BIT); +} + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_COMPAT_H */ diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h index 6b18ba9d2d8..1dbca4e7de6 100644 --- a/arch/powerpc/include/asm/pgtable-4k.h +++ b/arch/powerpc/include/asm/pgtable-4k.h @@ -60,7 +60,7 @@ /* It should be preserving the high 48 bits and then specifically */ /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_HPTEFLAGS) + _PAGE_HPTEFLAGS | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0 diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h index 07b0d8f09cb..7389003349a 100644 --- a/arch/powerpc/include/asm/pgtable-64k.h +++ b/arch/powerpc/include/asm/pgtable-64k.h @@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) * pgprot changes */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ - _PAGE_ACCESSED) + _PAGE_ACCESSED | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0x1ff diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index f69a4d97772..820b5f0a35c 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -429,7 +429,8 @@ extern int icache_44x_need_flush; #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SPECIAL) #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h index 853765eb1f6..00c1d9133cf 100644 --- a/arch/powerpc/include/asm/seccomp.h +++ b/arch/powerpc/include/asm/seccomp.h @@ -1,10 +1,6 @@ #ifndef _ASM_POWERPC_SECCOMP_H #define _ASM_POWERPC_SECCOMP_H -#ifdef __KERNEL__ -#include <linux/thread_info.h> -#endif - #include <linux/unistd.h> #define __NR_seccomp_read __NR_read diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 5af4e9b2dbe..73cb6a3229a 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, unsigned int flags) { - char *ptr = (char *) ¤t->thread.TS_FPR(reg); - int i, ret; + char *ptr0 = (char *) ¤t->thread.TS_FPR(reg); + char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); + int i, ret, sw = 0; if (!(flags & F)) return 0; if (reg & 1) return 0; /* invalid form: FRS/FRT must be even */ - if (!(flags & SW)) { - /* not byte-swapped - easy */ - if (!(flags & ST)) - ret = __copy_from_user(ptr, addr, 16); - else - ret = __copy_to_user(addr, ptr, 16); - } else { - /* each FPR value is byte-swapped separately */ - ret = 0; - for (i = 0; i < 16; ++i) { - if (!(flags & ST)) - ret |= __get_user(ptr[i^7], addr + i); - else - ret |= __put_user(ptr[i^7], addr + i); + if (flags & SW) + sw = 7; + ret = 0; + for (i = 0; i < 8; ++i) { + if (!(flags & ST)) { + ret |= __get_user(ptr0[i^sw], addr + i); + ret |= __get_user(ptr1[i^sw], addr + i + 8); + } else { + ret |= __put_user(ptr0[i^sw], addr + i); + ret |= __put_user(ptr1[i^sw], addr + i + 8); } } if (ret) @@ -646,11 +643,16 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, unsigned int areg, struct pt_regs *regs, unsigned int flags, unsigned int length) { - char *ptr = (char *) ¤t->thread.TS_FPR(reg); + char *ptr; int ret = 0; flush_vsx_to_thread(current); + if (reg < 32) + ptr = (char *) ¤t->thread.TS_FPR(reg); + else + ptr = (char *) ¤t->thread.vr[reg - 32]; + if (flags & ST) ret = __copy_to_user(addr, ptr, length); else { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2822c8ccfaa..5f81256287f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -125,6 +125,10 @@ static void kvmppc_free_vcpus(struct kvm *kvm) } } +void kvm_arch_sync_events(struct kvm *kvm) +{ +} + void kvm_arch_destroy_vm(struct kvm *kvm) { kvmppc_free_vcpus(kvm); diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 70693a5c12a..693b14a778f 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 72: std r8,8(r3) beq+ 3f addi r3,r3,16 -23: ld r9,8(r4) .Ldo_tail: bf cr7*4+1,1f - rotldi r9,r9,32 +23: lwz r9,8(r4) + addi r4,r4,4 73: stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f - rotldi r9,r9,16 +44: lhz r9,8(r4) + addi r4,r4,2 74: sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f - rotldi r9,r9,8 +45: lbz r9,8(r4) 75: stb r9,0(r3) 3: li r3,0 blr @@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 6: cmpwi cr1,r5,8 addi r3,r3,32 sld r9,r9,r10 - ble cr1,.Ldo_tail + ble cr1,7f 34: ld r0,8(r4) srd r7,r0,r11 or r9,r7,r9 - b .Ldo_tail +7: + bf cr7*4+1,1f + rotldi r9,r9,32 +94: stw r9,0(r3) + addi r3,r3,4 +1: bf cr7*4+2,2f + rotldi r9,r9,16 +95: sth r9,0(r3) + addi r3,r3,2 +2: bf cr7*4+3,3f + rotldi r9,r9,8 +96: stb r9,0(r3) +3: li r3,0 + blr .Ldst_unaligned: PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ @@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 121: 132: addi r3,r3,8 -123: 134: 135: 138: @@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 140: 141: 142: +123: +144: +145: /* * here we have had a fault on a load and r3 points to the first @@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 187: 188: 189: +194: +195: +196: 1: ld r6,-24(r1) ld r5,-8(r1) @@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) .llong 72b,172b .llong 23b,123b .llong 73b,173b + .llong 44b,144b .llong 74b,174b + .llong 45b,145b .llong 75b,175b .llong 24b,124b .llong 25b,125b @@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) .llong 79b,179b .llong 80b,180b .llong 34b,134b + .llong 94b,194b + .llong 95b,195b + .llong 96b,196b .llong 35b,135b .llong 81b,181b .llong 36b,136b diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index fe2d34e5332..e178922b2c2 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: std r8,8(r3) beq 3f addi r3,r3,16 - ld r9,8(r4) .Ldo_tail: bf cr7*4+1,1f - rotldi r9,r9,32 + lwz r9,8(r4) + addi r4,r4,4 stw r9,0(r3) addi r3,r3,4 1: bf cr7*4+2,2f - rotldi r9,r9,16 + lhz r9,8(r4) + addi r4,r4,2 sth r9,0(r3) addi r3,r3,2 2: bf cr7*4+3,3f - rotldi r9,r9,8 + lbz r9,8(r4) stb r9,0(r3) 3: ld r3,48(r1) /* return dest pointer */ blr @@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) cmpwi cr1,r5,8 addi r3,r3,32 sld r9,r9,r10 - ble cr1,.Ldo_tail + ble cr1,6f ld r0,8(r4) srd r7,r0,r11 or r9,r7,r9 - b .Ldo_tail +6: + bf cr7*4+1,1f + rotldi r9,r9,32 + stw r9,0(r3) + addi r3,r3,4 +1: bf cr7*4+2,2f + rotldi r9,r9,16 + sth r9,0(r3) + addi r3,r3,2 +2: bf cr7*4+3,3f + rotldi r9,r9,8 + stb r9,0(r3) +3: ld r3,48(r1) /* return dest pointer */ + blr .Ldst_unaligned: PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 7393bd76d69..5ac08b8ab65 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -19,6 +19,7 @@ #include <linux/notifier.h> #include <linux/lmb.h> #include <linux/of.h> +#include <linux/pfn.h> #include <asm/sparsemem.h> #include <asm/prom.h> #include <asm/system.h> @@ -882,7 +883,7 @@ static void mark_reserved_regions_for_nid(int nid) unsigned long physbase = lmb.reserved.region[i].base; unsigned long size = lmb.reserved.region[i].size; unsigned long start_pfn = physbase >> PAGE_SHIFT; - unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); + unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; unsigned long node_end_pfn = node->node_start_pfn + node->node_spanned_pages; @@ -908,7 +909,7 @@ static void mark_reserved_regions_for_nid(int nid) */ if (end_pfn > node_ar.end_pfn) reserve_size = (node_ar.end_pfn << PAGE_SHIFT) - - (start_pfn << PAGE_SHIFT); + - physbase; /* * Only worry about *this* node, others may not * yet have valid NODE_DATA(). diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 67de6bf3db3..d281cc0bca7 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -328,7 +328,7 @@ static int __init ps3_mm_add_memory(void) return result; } -core_initcall(ps3_mm_add_memory); +device_initcall(ps3_mm_add_memory); /*============================================================================*/ /* dma routines */ diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index 77fae5f64f2..5558d932b4d 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -204,6 +204,23 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose, { u32 ma, pcila, pciha; + /* Hack warning ! The "old" PCI 2.x cell only let us configure the low + * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit + * address are actually hard wired to a value that appears to depend + * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx. + * + * The trick here is we just crop those top bits and ignore them when + * programming the chip. That means the device-tree has to be right + * for the specific part used (we don't print a warning if it's wrong + * but on the other hand, you'll crash quickly enough), but at least + * this code should work whatever the hard coded value is + */ + plb_addr &= 0xffffffffull; + + /* Note: Due to the above hack, the test below doesn't actually test + * if you address is above 4G, but it tests that address and + * (address + size) are both contained in the same 4G + */ if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || size < 0x1000 || (plb_addr & (size - 1)) != 0) { printk(KERN_WARNING "%s: Resource out of range\n", |