From 7d24f0b8a53261709938ffabe3e00f88f6498df9 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Mon, 7 Nov 2005 00:57:52 -0800 Subject: [PATCH] ppc64: Fix bug in SLB miss handler for hugepages This patch, however, should be applied on top of the 64k-page-size patch to fix some problems with hugepage (some pre-existing, another introduced by this patch). The patch fixes a bug in the SLB miss handler for hugepages on ppc64 introduced by the dynamic hugepage patch (commit id c594adad5653491813959277fb87a2fef54c4e05) due to a misunderstanding of the srd instruction's behaviour (mea culpa). The problem arises when a 64-bit process maps some hugepages in the low 4GB of the address space (unusual). In this case, as well as the 256M segment in question being marked for hugepages, other segments at 32G intervals will be incorrectly marked for hugepages. In the process, this patch tweaks the semantics of the hugepage bitmaps to be more sensible. Previously, an address below 4G was marked for hugepages if the appropriate segment bit in the "low areas" bitmask was set *or* if the low bit in the "high areas" bitmap was set (which would mark all addresses below 1TB for hugepage). With this patch, any given address is governed by a single bitmap. Addresses below 4GB are marked for hugepage if and only if their bit is set in the "low areas" bitmap (256M granularity). Addresses between 4GB and 1TB are marked for hugepage iff the low bit in the "high areas" bitmap is set. Higher addresses are marked for hugepage iff their bit in the "high areas" bitmap is set (1TB granularity). To avoid conflicts, this patch must be applied on top of BenH's pending patch for 64k base page size [0]. As such, this patch also addresses a hugepage problem introduced by that patch. That patch allows hugepages of 1MB in size on hardware which supports it, however, that won't work when using 4k pages (4 level pagetable), because in that case hugepage PTEs are stored at the PMD level, and each PMD entry maps 2MB. This patch simply disallows hugepages in that case (we can do something cleverer to re-enable them some other day). Built, booted, and a handful of hugepage related tests passed on POWER5 LPAR (both ARCH=powerpc and ARCH=ppc64). [0] http://gate.crashing.org/~benh/ppc64-64k-pages.diff Signed-off-by: David Gibson Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hash_utils_64.c | 6 ++++-- arch/powerpc/mm/hugetlbpage.c | 6 ++++++ arch/powerpc/mm/slb_low.S | 13 +++++++++---- 3 files changed, 19 insertions(+), 6 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b2f3dbca695..f15dfb92dec 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -329,12 +329,14 @@ static void __init htab_init_page_sizes(void) */ if (mmu_psize_defs[MMU_PAGE_16M].shift) mmu_huge_psize = MMU_PAGE_16M; + /* With 4k/4level pagetables, we can't (for now) cope with a + * huge page size < PMD_SIZE */ else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_huge_psize = MMU_PAGE_1M; /* Calculate HPAGE_SHIFT and sanity check it */ - if (mmu_psize_defs[mmu_huge_psize].shift > 16 && - mmu_psize_defs[mmu_huge_psize].shift < 28) + if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT && + mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT) HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift; else HPAGE_SHIFT = 0; /* No huge pages dude ! */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0073a04047e..426c269e552 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -212,6 +212,12 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) BUG_ON(area >= NUM_HIGH_AREAS); + /* Hack, so that each addresses is controlled by exactly one + * of the high or low area bitmaps, the first high area starts + * at 4GB, not 0 */ + if (start == 0) + start = 0x100000000UL; + /* Check no VMAs are in the region */ vma = find_vma(mm, start); if (vma && (vma->vm_start < end)) diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 3e18241b6f3..950ffc5848c 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -80,12 +80,17 @@ _GLOBAL(slb_miss_kernel_load_virtual) BEGIN_FTR_SECTION b 1f END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE) + cmpldi r10,16 + + lhz r9,PACALOWHTLBAREAS(r13) + mr r11,r10 + blt 5f + lhz r9,PACAHIGHHTLBAREAS(r13) srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT) - srd r9,r9,r11 - lhz r11,PACALOWHTLBAREAS(r13) - srd r11,r11,r10 - or. r9,r9,r11 + +5: srd r9,r9,r11 + andi. r9,r9,1 beq 1f _GLOBAL(slb_miss_user_load_huge) li r11,0 -- cgit v1.2.3 From 732ee21f2894819781766a0cd88e32bdd630d11e Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Mon, 7 Nov 2005 00:57:55 -0800 Subject: [PATCH] POWERPC/PPC64: Fix CONFIG_SMP=n build for ppc64 Two CONFIG_SMP=n build fixes due to missing includes. Signed-off-by: Olof Johansson Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/time.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6996a593dcb..b1c89bc4bf9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -69,6 +69,7 @@ #include #include #endif +#include /* keep track of when we need to update the rtc */ time_t last_rtc_update; -- cgit v1.2.3 From 863c84b97cb660dbb949398e196c0b1bbe4ed39f Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 7 Nov 2005 00:57:58 -0800 Subject: [PATCH] ppc: Fix ppc32 build after 64K pages Oops, some last minute changes caused the 64K pages patch to break ppc32 build, this fixes it. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/Kconfig | 1 + arch/powerpc/mm/ppc_mmu_32.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ca7acb0c79f..55ce4957052 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -605,6 +605,7 @@ config NODES_SPAN_OTHER_NODES config PPC_64K_PAGES bool "64k page size" + depends on PPC64 help This option changes the kernel logical page size to 64k. On machines without processor support for 64k pages, the kernel will simulate diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index d137abd241f..ed7fcfe5fd3 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -188,9 +188,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, if (Hash == 0) return; - pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address); + pmd = pmd_offset(pgd_offset(mm, ea), ea); if (!pmd_none(*pmd)) - add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd)); + add_hash_page(mm->context, ea, pmd_val(*pmd)); } /* -- cgit v1.2.3 From b0f7b8bc57ee90138a7c429951457027a90c326f Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 7 Nov 2005 00:58:13 -0800 Subject: [PATCH] ppc32: Add 440SPe support Add support for the AMCC PowerPC 440SPe SoC, including PCI Express in root port mode. Signed-off-by: Roland Dreier Cc: Matt Porter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/cputable.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 33c63bcf69f..cc4e9eb1c13 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -929,6 +929,16 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, }, + { /* 440SPe Rev. A */ + .pvr_mask = 0xff000fff, + .pvr_value = 0x53000890, + .cpu_name = "440SPe Rev. A", + .cpu_features = CPU_FTR_SPLIT_ID_CACHE | + CPU_FTR_USE_TB, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .icache_bsize = 32, + .dcache_bsize = 32, + }, #endif /* CONFIG_44x */ #ifdef CONFIG_FSL_BOOKE { /* e200z5 */ -- cgit v1.2.3 From cd6b0762a04978baf48412456a687842de97e381 Mon Sep 17 00:00:00 2001 From: Prasanna S Panchamukhi Date: Mon, 7 Nov 2005 00:59:14 -0800 Subject: [PATCH] Move Kprobes and Oprofile to "Instrumentation Support" menu Andrew Morton suggested to move kprobes from kernel hacking menu, since kernel hacking menu is in-appropriate for the Kprobes. This patch moves Kprobes and Oprofile under instrumentation menu. (akpm: it's not a natural fit, but things like djprobes and the s390 guys' statistics library need a home) Signed-of-by: Prasanna S Panchamukhi Cc: Philippe Elie Cc: John Levon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/Kconfig | 13 +++++++++++++ arch/powerpc/Kconfig.debug | 10 ---------- arch/powerpc/oprofile/Kconfig | 6 ------ 3 files changed, 13 insertions(+), 16 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 55ce4957052..6ffae2d2b3f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -917,8 +917,21 @@ source "arch/powerpc/platforms/iseries/Kconfig" source "lib/Kconfig" +menu "Instrumentation Support" + depends on EXPERIMENTAL + source "arch/powerpc/oprofile/Kconfig" +config KPROBES + bool "Kprobes (EXPERIMENTAL)" + help + Kprobes allows you to trap at almost any kernel address and + execute a callback function. register_kprobe() establishes + a probepoint and specifies the callback. Kprobes is useful + for kernel debugging, non-intrusive instrumentation and testing. + If in doubt, say "N". +endmenu + source "arch/powerpc/Kconfig.debug" source "security/Kconfig" diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 0baf64ec80d..30a30bf559e 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -9,16 +9,6 @@ config DEBUG_STACKOVERFLOW This option will cause messages to be printed if free stack space drops below a certain limit. -config KPROBES - bool "Kprobes" - depends on DEBUG_KERNEL && PPC64 - help - Kprobes allows you to trap at almost any kernel address and - execute a callback function. register_kprobe() establishes - a probepoint and specifies the callback. Kprobes is useful - for kernel debugging, non-intrusive instrumentation and testing. - If in doubt, say "N". - config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" depends on DEBUG_KERNEL && PPC64 diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig index 19d37730b66..eb2dece76a5 100644 --- a/arch/powerpc/oprofile/Kconfig +++ b/arch/powerpc/oprofile/Kconfig @@ -1,7 +1,3 @@ - -menu "Profiling support" - depends on EXPERIMENTAL - config PROFILING bool "Profiling support (EXPERIMENTAL)" help @@ -19,5 +15,3 @@ config OPROFILE If unsure, say N. -endmenu - -- cgit v1.2.3 From 481bed454247538e9f57d4ea37b153ccba24ba7b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Nov 2005 00:59:47 -0800 Subject: [PATCH] consolidate sys_ptrace() The sys_ptrace boilerplate code (everything outside the big switch statement for the arch-specific requests) is shared by most architectures. This patch moves it to kernel/ptrace.c and leaves the arch-specific code as arch_ptrace. Some architectures have a too different ptrace so we have to exclude them. They continue to keep their implementations. For sh64 I had to add a sh64_ptrace wrapper because it does some initialization on the first call. For um I removed an ifdefed SUBARCH_PTRACE_SPECIAL block, but SUBARCH_PTRACE_SPECIAL isn't defined anywhere in the tree. Signed-off-by: Christoph Hellwig Acked-by: Paul Mackerras Acked-by: Ralf Baechle Acked-By: David Howells Acked-by: Russell King Acked-by: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/ptrace.c | 43 ++----------------------------------------- 1 file changed, 2 insertions(+), 41 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 568ea335d61..3d2abd95c7a 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -248,46 +248,10 @@ void ptrace_disable(struct task_struct *child) clear_single_step(child); } -long sys_ptrace(long request, long pid, long addr, long data) +long arch_ptrace(struct task_struct *child, long request, long addr, long data) { - struct task_struct *child; int ret = -EPERM; - lock_kernel(); - if (request == PTRACE_TRACEME) { - /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) - goto out; - ret = security_ptrace(current->parent, current); - if (ret) - goto out; - /* set the ptrace bit in the process flags. */ - current->ptrace |= PT_PTRACED; - ret = 0; - goto out; - } - ret = -ESRCH; - read_lock(&tasklist_lock); - child = find_task_by_pid(pid); - if (child) - get_task_struct(child); - read_unlock(&tasklist_lock); - if (!child) - goto out; - - ret = -EPERM; - if (pid == 1) /* you may not mess with init */ - goto out_tsk; - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - goto out_tsk; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out_tsk; - switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ @@ -540,10 +504,7 @@ long sys_ptrace(long request, long pid, long addr, long data) ret = ptrace_request(child, request, addr, data); break; } -out_tsk: - put_task_struct(child); -out: - unlock_kernel(); + return ret; } -- cgit v1.2.3 From b2325fe1b7e5654fac9e9419423aa2c58a3dbd83 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 7 Nov 2005 01:01:35 -0800 Subject: [PATCH] kfree cleanup: arch This is the arch/ part of the big kfree cleanup patch. Remove pointless checks for NULL prior to calling kfree() in arch/. Signed-off-by: Jesper Juhl Acked-by: Grant Grundler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/pseries/reconfig.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 58c61219d08..d7d40033945 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -286,10 +286,8 @@ static struct property *new_property(const char *name, const int length, return new; cleanup: - if (new->name) - kfree(new->name); - if (new->value) - kfree(new->value); + kfree(new->name); + kfree(new->value); kfree(new); return NULL; } -- cgit v1.2.3