diff options
author | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-08-09 16:51:35 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-08-09 16:51:35 +0100 |
commit | c973b112c76c9d8fd042991128f218a738cc8d0a (patch) | |
tree | e813b0da5d0a0e19e06de6462d145a29ad683026 /arch/ppc64/mm/stab.c | |
parent | c5fbc3966f48279dbebfde10248c977014aa9988 (diff) | |
parent | 00dd1e433967872f3997a45d5adf35056fdf2f56 (diff) |
Merge with /shiny/git/linux-2.6/.git
Diffstat (limited to 'arch/ppc64/mm/stab.c')
-rw-r--r-- | arch/ppc64/mm/stab.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c index df4bbe14153..1b83f002bf2 100644 --- a/arch/ppc64/mm/stab.c +++ b/arch/ppc64/mm/stab.c @@ -18,6 +18,8 @@ #include <asm/mmu_context.h> #include <asm/paca.h> #include <asm/cputable.h> +#include <asm/lmb.h> +#include <asm/abs_addr.h> struct stab_entry { unsigned long esid_data; @@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) extern void slb_initialize(void); /* + * Allocate segment tables for secondary CPUs. These must all go in + * the first (bolted) segment, so that do_stab_bolted won't get a + * recursive segment miss on the segment table itself. + */ +void stabs_alloc(void) +{ + int cpu; + + if (cpu_has_feature(CPU_FTR_SLB)) + return; + + for_each_cpu(cpu) { + unsigned long newstab; + + if (cpu == 0) + continue; /* stab for CPU 0 is statically allocated */ + + newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT); + if (! newstab) + panic("Unable to allocate segment table for CPU %d.\n", + cpu); + + newstab += KERNELBASE; + + memset((void *)newstab, 0, PAGE_SIZE); + + paca[cpu].stab_addr = newstab; + paca[cpu].stab_real = virt_to_abs(newstab); + printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); + } +} + +/* * Build an entry for the base kernel segment and put it into * the segment table or SLB. All other segment table or SLB * entries are faulted in. |