From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- arch/cris/arch-v10/mm/Makefile | 6 + arch/cris/arch-v10/mm/fault.c | 117 ++++++++++++++++++ arch/cris/arch-v10/mm/init.c | 264 +++++++++++++++++++++++++++++++++++++++++ arch/cris/arch-v10/mm/tlb.c | 248 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 635 insertions(+) create mode 100644 arch/cris/arch-v10/mm/Makefile create mode 100644 arch/cris/arch-v10/mm/fault.c create mode 100644 arch/cris/arch-v10/mm/init.c create mode 100644 arch/cris/arch-v10/mm/tlb.c (limited to 'arch/cris/arch-v10/mm') diff --git a/arch/cris/arch-v10/mm/Makefile b/arch/cris/arch-v10/mm/Makefile new file mode 100644 index 00000000000..588b4baee85 --- /dev/null +++ b/arch/cris/arch-v10/mm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the linux cris-specific parts of the memory manager. +# + +obj-y := fault.o init.o tlb.o + diff --git a/arch/cris/arch-v10/mm/fault.c b/arch/cris/arch-v10/mm/fault.c new file mode 100644 index 00000000000..6805cdb25a5 --- /dev/null +++ b/arch/cris/arch-v10/mm/fault.c @@ -0,0 +1,117 @@ +/* + * linux/arch/cris/mm/fault.c + * + * Low level bus fault handler + * + * + * Copyright (C) 2000, 2001 Axis Communications AB + * + * Authors: Bjorn Wesen + * + */ + +#include +#include +#include +#include + +/* debug of low-level TLB reload */ +#undef DEBUG + +#ifdef DEBUG +#define D(x) x +#else +#define D(x) +#endif + +extern volatile pgd_t *current_pgd; + +extern const struct exception_table_entry + *search_exception_tables(unsigned long addr); + +asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs, + int protection, int writeaccess); + +/* fast TLB-fill fault handler + * this is called from entry.S with interrupts disabled + */ + +void +handle_mmu_bus_fault(struct pt_regs *regs) +{ + int cause; + int select; +#ifdef DEBUG + int index; + int page_id; + int acc, inv; +#endif + pgd_t* pgd = (pgd_t*)current_pgd; + pmd_t *pmd; + pte_t pte; + int miss, we, writeac; + unsigned long address; + unsigned long flags; + + cause = *R_MMU_CAUSE; + + address = cause & PAGE_MASK; /* get faulting address */ + select = *R_TLB_SELECT; + +#ifdef DEBUG + page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause); + acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause); + inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); + index = IO_EXTRACT(R_TLB_SELECT, index, select); +#endif + miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause); + we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause); + writeac = IO_EXTRACT(R_MMU_CAUSE, wr_rd, cause); + + D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n", + regs->irp, address, miss, inv, we, acc, index, page_id)); + + /* leave it to the MM system fault handler */ + if (miss) + do_page_fault(address, regs, 0, writeac); + else + do_page_fault(address, regs, 1, we); + + /* Reload TLB with new entry to avoid an extra miss exception. + * do_page_fault may have flushed the TLB so we have to restore + * the MMU registers. + */ + local_save_flags(flags); + local_irq_disable(); + pmd = (pmd_t *)(pgd + pgd_index(address)); + if (pmd_none(*pmd)) + return; + pte = *pte_offset_kernel(pmd, address); + if (!pte_present(pte)) + return; + *R_TLB_SELECT = select; + *R_TLB_HI = cause; + *R_TLB_LO = pte_val(pte); + local_irq_restore(flags); +} + +/* Called from arch/cris/mm/fault.c to find fixup code. */ +int +find_fixup_code(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + if ((fixup = search_exception_tables(regs->irp)) != 0) { + /* Adjust the instruction pointer in the stackframe. */ + regs->irp = fixup->fixup; + + /* + * Don't return by restoring the CPU state, so switch + * frame-type. + */ + regs->frametype = CRIS_FRAME_NORMAL; + return 1; + } + + return 0; +} diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c new file mode 100644 index 00000000000..a9f975a9cfb --- /dev/null +++ b/arch/cris/arch-v10/mm/init.c @@ -0,0 +1,264 @@ +/* + * linux/arch/cris/arch-v10/mm/init.c + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void tlb_init(void); + +/* + * The kernel is already mapped with a kernel segment at kseg_c so + * we don't need to map it with a page table. However head.S also + * temporarily mapped it at kseg_4 so we should set up the ksegs again, + * clear the TLB and do some other paging setup stuff. + */ + +void __init +paging_init(void) +{ + int i; + unsigned long zones_size[MAX_NR_ZONES]; + + printk("Setting up paging and the MMU.\n"); + + /* clear out the init_mm.pgd that will contain the kernel's mappings */ + + for(i = 0; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i] = __pgd(0); + + /* make sure the current pgd table points to something sane + * (even if it is most probably not used until the next + * switch_mm) + */ + + current_pgd = init_mm.pgd; + + /* initialise the TLB (tlb.c) */ + + tlb_init(); + + /* see README.mm for details on the KSEG setup */ + +#ifdef CONFIG_CRIS_LOW_MAP + /* Etrax-100 LX version 1 has a bug so that we cannot map anything + * across the 0x80000000 boundary, so we need to shrink the user-virtual + * area to 0x50000000 instead of 0xb0000000 and map things slightly + * different. The unused areas are marked as paged so that we can catch + * freak kernel accesses there. + * + * The ARTPEC chip is mapped at 0xa so we pass that segment straight + * through. We cannot vremap it because the vmalloc area is below 0x8 + * and Juliette needs an uncached area above 0x8. + * + * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards. + * We map them straight over in LOW_MAP, but use vremap in LX version 2. + */ + +#define CACHED_BOOTROM (KSEG_F | 0x08000000UL) + + *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */ + IO_STATE(R_MMU_KSEG, seg_e, page ) | + IO_STATE(R_MMU_KSEG, seg_d, page ) | + IO_STATE(R_MMU_KSEG, seg_c, page ) | + IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ +#ifdef CONFIG_JULIETTE + IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* ARTPEC etc. */ +#else + IO_STATE(R_MMU_KSEG, seg_a, page ) | +#endif + IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */ + IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */ + IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ + IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */ + IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */ + IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ + + *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) | + IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | +#ifdef CONFIG_JULIETTE + IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) | +#else + IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | +#endif + IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) | + IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) ); + + *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | + IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); +#else + /* This code is for the corrected Etrax-100 LX version 2... */ + +#define CACHED_BOOTROM (KSEG_A | 0x08000000UL) + + *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */ + IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */ + IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */ + IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */ + IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ + IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* bootrom */ + IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_8, page ) | + IO_STATE(R_MMU_KSEG, seg_7, page ) | + IO_STATE(R_MMU_KSEG, seg_6, page ) | + IO_STATE(R_MMU_KSEG, seg_5, page ) | + IO_STATE(R_MMU_KSEG, seg_4, page ) | + IO_STATE(R_MMU_KSEG, seg_3, page ) | + IO_STATE(R_MMU_KSEG, seg_2, page ) | + IO_STATE(R_MMU_KSEG, seg_1, page ) | + IO_STATE(R_MMU_KSEG, seg_0, page ) ); + + *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | + IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) | + IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | + IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) | + IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); + + *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); +#endif + + *R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) ); + + /* The MMU has been enabled ever since head.S but just to make + * it totally obvious we do it here as well. + */ + + *R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) | + IO_STATE(R_MMU_CTRL, acc_excp, enable ) | + IO_STATE(R_MMU_CTRL, we_excp, enable ) ); + + *R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable); + + /* + * initialize the bad page table and bad page to point + * to a couple of allocated pages + */ + + empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + /* All pages are DMA'able in Etrax, so put all in the DMA'able zone */ + + zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; + + for (i = 1; i < MAX_NR_ZONES; i++) + zones_size[i] = 0; + + /* Use free_area_init_node instead of free_area_init, because the former + * is designed for systems where the DRAM starts at an address substantially + * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the + * mem_map page array. + */ + + free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); +} + +/* Initialize remaps of some I/O-ports. It is important that this + * is called before any driver is initialized. + */ + +static int +__init init_ioremap(void) +{ + + /* Give the external I/O-port addresses their values */ + +#ifdef CONFIG_CRIS_LOW_MAP + /* Simply a linear map (see the KSEG map above in paging_init) */ + port_cse1_addr = (volatile unsigned long *)(MEM_CSE1_START | + MEM_NON_CACHEABLE); + port_csp0_addr = (volatile unsigned long *)(MEM_CSP0_START | + MEM_NON_CACHEABLE); + port_csp4_addr = (volatile unsigned long *)(MEM_CSP4_START | + MEM_NON_CACHEABLE); +#else + /* Note that nothing blows up just because we do this remapping + * it's ok even if the ports are not used or connected + * to anything (or connected to a non-I/O thing) */ + port_cse1_addr = (volatile unsigned long *) + ioremap((unsigned long)(MEM_CSE1_START | MEM_NON_CACHEABLE), 16); + port_csp0_addr = (volatile unsigned long *) + ioremap((unsigned long)(MEM_CSP0_START | MEM_NON_CACHEABLE), 16); + port_csp4_addr = (volatile unsigned long *) + ioremap((unsigned long)(MEM_CSP4_START | MEM_NON_CACHEABLE), 16); +#endif + return 0; +} + +__initcall(init_ioremap); + +/* Helper function for the two below */ + +static inline void +flush_etrax_cacherange(void *startadr, int length) +{ + /* CACHED_BOOTROM is mapped to the boot-rom area (cached) which + * we can use to get fast dummy-reads of cachelines + */ + + volatile short *flushadr = (volatile short *)(((unsigned long)startadr & ~PAGE_MASK) | + CACHED_BOOTROM); + + length = length > 8192 ? 8192 : length; /* No need to flush more than cache size */ + + while(length > 0) { + *flushadr; /* dummy read to flush */ + flushadr += (32/sizeof(short)); /* a cacheline is 32 bytes */ + length -= 32; + } +} + +/* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers + * will occationally corrupt certain CPU writes if the DMA buffers + * happen to be hot in the cache. + * + * As a workaround, we have to flush the relevant parts of the cache + * before (re) inserting any receiving descriptor into the DMA HW. + */ + +void +prepare_rx_descriptor(struct etrax_dma_descr *desc) +{ + flush_etrax_cacherange((void *)desc->buf, desc->sw_len ? desc->sw_len : 65536); +} + +/* Do the same thing but flush the entire cache */ + +void +flush_etrax_cache(void) +{ + flush_etrax_cacherange(0, 8192); +} diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c new file mode 100644 index 00000000000..9d06125ff5a --- /dev/null +++ b/arch/cris/arch-v10/mm/tlb.c @@ -0,0 +1,248 @@ +/* + * linux/arch/cris/arch-v10/mm/tlb.c + * + * Low level TLB handling + * + * + * Copyright (C) 2000-2002 Axis Communications AB + * + * Authors: Bjorn Wesen (bjornw@axis.com) + * + */ + +#include +#include +#include + +#define D(x) + +/* The TLB can host up to 64 different mm contexts at the same time. + * The running context is R_MMU_CONTEXT, and each TLB entry contains a + * page_id that has to match to give a hit. In page_id_map, we keep track + * of which mm's we have assigned which page_id's, so that we know when + * to invalidate TLB entries. + * + * The last page_id is never running - it is used as an invalid page_id + * so we can make TLB entries that will never match. + * + * Notice that we need to make the flushes atomic, otherwise an interrupt + * handler that uses vmalloced memory might cause a TLB load in the middle + * of a flush causing. + */ + +/* invalidate all TLB entries */ + +void +flush_tlb_all(void) +{ + int i; + unsigned long flags; + + /* the vpn of i & 0xf is so we dont write similar TLB entries + * in the same 4-way entry group. details.. + */ + + local_save_flags(flags); + local_irq_disable(); + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + local_irq_restore(flags); + D(printk("tlb: flushed all\n")); +} + +/* invalidate the selected mm context only */ + +void +flush_tlb_mm(struct mm_struct *mm) +{ + int i; + int page_id = mm->context.page_id; + unsigned long flags; + + D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + /* mark the TLB entries that match the page_id as invalid. + * here we could also check the _PAGE_GLOBAL bit and NOT flush + * global pages. is it worth the extra I/O ? + */ + + local_save_flags(flags); + local_irq_disable(); + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } + local_irq_restore(flags); +} + +/* invalidate a single page */ + +void +flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + int page_id = mm->context.page_id; + int i; + unsigned long flags; + + D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + addr &= PAGE_MASK; /* perhaps not necessary */ + + /* invalidate those TLB entries that match both the mm context + * and the virtual address requested + */ + + local_save_flags(flags); + local_irq_disable(); + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + unsigned long tlb_hi; + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + tlb_hi = *R_TLB_HI; + if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && + (tlb_hi & PAGE_MASK) == addr) { + *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + addr; /* same addr as before works. */ + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } + local_irq_restore(flags); +} + +/* invalidate a page range */ + +void +flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int page_id = mm->context.page_id; + int i; + unsigned long flags; + + D(printk("tlb: flush range %p<->%p in context %d (%p)\n", + start, end, page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + start &= PAGE_MASK; /* probably not necessary */ + end &= PAGE_MASK; /* dito */ + + /* invalidate those TLB entries that match both the mm context + * and the virtual address range + */ + + local_save_flags(flags); + local_irq_disable(); + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + unsigned long tlb_hi, vpn; + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + tlb_hi = *R_TLB_HI; + vpn = tlb_hi & PAGE_MASK; + if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && + vpn >= start && vpn < end) { + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } + local_irq_restore(flags); +} + +/* dump the entire TLB for debug purposes */ + +#if 0 +void +dump_tlb_all(void) +{ + int i; + unsigned long flags; + + printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); + + local_save_flags(flags); + local_irq_disable(); + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); + printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", + i, *R_TLB_HI, *R_TLB_LO); + } + local_irq_restore(flags); +} +#endif + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ + +int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context.page_id = NO_CONTEXT; + return 0; +} + +/* called in schedule() just before actually doing the switch_to */ + +void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + /* make sure we have a context */ + + get_mmu_context(next); + + /* remember the pgd for the fault handlers + * this is similar to the pgd register in some other CPU's. + * we need our own copy of it because current and active_mm + * might be invalid at points where we still need to derefer + * the pgd. + */ + + current_pgd = next->pgd; + + /* switch context in the MMU */ + + D(printk("switching mmu_context to %d (%p)\n", next->context, next)); + + *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); +} + -- cgit v1.2.3