From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- arch/i386/kernel/cpu/mtrr/Makefile | 5 + arch/i386/kernel/cpu/mtrr/amd.c | 121 +++++++ arch/i386/kernel/cpu/mtrr/centaur.c | 223 ++++++++++++ arch/i386/kernel/cpu/mtrr/changelog | 229 ++++++++++++ arch/i386/kernel/cpu/mtrr/cyrix.c | 364 +++++++++++++++++++ arch/i386/kernel/cpu/mtrr/generic.c | 417 ++++++++++++++++++++++ arch/i386/kernel/cpu/mtrr/if.c | 374 +++++++++++++++++++ arch/i386/kernel/cpu/mtrr/main.c | 693 ++++++++++++++++++++++++++++++++++++ arch/i386/kernel/cpu/mtrr/mtrr.h | 98 +++++ arch/i386/kernel/cpu/mtrr/state.c | 78 ++++ 10 files changed, 2602 insertions(+) create mode 100644 arch/i386/kernel/cpu/mtrr/Makefile create mode 100644 arch/i386/kernel/cpu/mtrr/amd.c create mode 100644 arch/i386/kernel/cpu/mtrr/centaur.c create mode 100644 arch/i386/kernel/cpu/mtrr/changelog create mode 100644 arch/i386/kernel/cpu/mtrr/cyrix.c create mode 100644 arch/i386/kernel/cpu/mtrr/generic.c create mode 100644 arch/i386/kernel/cpu/mtrr/if.c create mode 100644 arch/i386/kernel/cpu/mtrr/main.c create mode 100644 arch/i386/kernel/cpu/mtrr/mtrr.h create mode 100644 arch/i386/kernel/cpu/mtrr/state.c (limited to 'arch/i386/kernel/cpu/mtrr') diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile new file mode 100644 index 00000000000..a25b701ab84 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/Makefile @@ -0,0 +1,5 @@ +obj-y := main.o if.o generic.o state.o +obj-y += amd.o +obj-y += cyrix.o +obj-y += centaur.o + diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c new file mode 100644 index 00000000000..1a1e04b6fd0 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/amd.c @@ -0,0 +1,121 @@ +#include +#include +#include +#include + +#include "mtrr.h" + +static void +amd_get_mtrr(unsigned int reg, unsigned long *base, + unsigned int *size, mtrr_type * type) +{ + unsigned long low, high; + + rdmsr(MSR_K6_UWCCR, low, high); + /* Upper dword is region 1, lower is region 0 */ + if (reg == 1) + low = high; + /* The base masks off on the right alignment */ + *base = (low & 0xFFFE0000) >> PAGE_SHIFT; + *type = 0; + if (low & 1) + *type = MTRR_TYPE_UNCACHABLE; + if (low & 2) + *type = MTRR_TYPE_WRCOMB; + if (!(low & 3)) { + *size = 0; + return; + } + /* + * This needs a little explaining. The size is stored as an + * inverted mask of bits of 128K granularity 15 bits long offset + * 2 bits + * + * So to get a size we do invert the mask and add 1 to the lowest + * mask bit (4 as its 2 bits in). This gives us a size we then shift + * to turn into 128K blocks + * + * eg 111 1111 1111 1100 is 512K + * + * invert 000 0000 0000 0011 + * +1 000 0000 0000 0100 + * *128K ... + */ + low = (~low) & 0x1FFFC; + *size = (low + 4) << (15 - PAGE_SHIFT); + return; +} + +static void amd_set_mtrr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +/* [SUMMARY] Set variable MTRR register on the local CPU. + The register to set. + The base address of the region. + The size of the region. If this is 0 the region is disabled. + The type of the region. + If TRUE, do the change safely. If FALSE, safety measures should + be done externally. + [RETURNS] Nothing. +*/ +{ + u32 regs[2]; + + /* + * Low is MTRR0 , High MTRR 1 + */ + rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); + /* + * Blank to disable + */ + if (size == 0) + regs[reg] = 0; + else + /* Set the register to the base, the type (off by one) and an + inverted bitmask of the size The size is the only odd + bit. We are fed say 512K We invert this and we get 111 1111 + 1111 1011 but if you subtract one and invert you get the + desired 111 1111 1111 1100 mask + + But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ + regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) + | (base << PAGE_SHIFT) | (type + 1); + + /* + * The writeback rule is quite specific. See the manual. Its + * disable local interrupts, write back the cache, set the mtrr + */ + wbinvd(); + wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); +} + +static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) +{ + /* Apply the K6 block alignment and size rules + In order + o Uncached or gathering only + o 128K or bigger block + o Power of 2 block + o base suitably aligned to the power + */ + if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) + || (size & ~(size - 1)) - size || (base & (size - 1))) + return -EINVAL; + return 0; +} + +static struct mtrr_ops amd_mtrr_ops = { + .vendor = X86_VENDOR_AMD, + .set = amd_set_mtrr, + .get = amd_get_mtrr, + .get_free_region = generic_get_free_region, + .validate_add_page = amd_validate_add_page, + .have_wrcomb = positive_have_wrcomb, +}; + +int __init amd_init_mtrr(void) +{ + set_mtrr_ops(&amd_mtrr_ops); + return 0; +} + +//arch_initcall(amd_mtrr_init); diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c new file mode 100644 index 00000000000..33f00ac314e --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/centaur.c @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include "mtrr.h" + +static struct { + unsigned long high; + unsigned long low; +} centaur_mcr[8]; + +static u8 centaur_mcr_reserved; +static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ + +/* + * Report boot time MCR setups + */ + +static int +centaur_get_free_region(unsigned long base, unsigned long size) +/* [SUMMARY] Get a free MTRR. + The starting (base) address of the region. + The size (in bytes) of the region. + [RETURNS] The index of the region on success, else -1 on error. +*/ +{ + int i, max; + mtrr_type ltype; + unsigned long lbase; + unsigned int lsize; + + max = num_var_ranges; + for (i = 0; i < max; ++i) { + if (centaur_mcr_reserved & (1 << i)) + continue; + mtrr_if->get(i, &lbase, &lsize, <ype); + if (lsize == 0) + return i; + } + return -ENOSPC; +} + +void +mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) +{ + centaur_mcr[mcr].low = lo; + centaur_mcr[mcr].high = hi; +} + +static void +centaur_get_mcr(unsigned int reg, unsigned long *base, + unsigned int *size, mtrr_type * type) +{ + *base = centaur_mcr[reg].high >> PAGE_SHIFT; + *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; + *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ + if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) + *type = MTRR_TYPE_UNCACHABLE; + if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) + *type = MTRR_TYPE_WRBACK; + if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) + *type = MTRR_TYPE_WRBACK; + +} + +static void centaur_set_mcr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +{ + unsigned long low, high; + + if (size == 0) { + /* Disable */ + high = low = 0; + } else { + high = base << PAGE_SHIFT; + if (centaur_mcr_type == 0) + low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ + else { + if (type == MTRR_TYPE_UNCACHABLE) + low = -size << PAGE_SHIFT | 0x02; /* NC */ + else + low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ + } + } + centaur_mcr[reg].high = high; + centaur_mcr[reg].low = low; + wrmsr(MSR_IDT_MCR0 + reg, low, high); +} + +#if 0 +/* + * Initialise the later (saner) Winchip MCR variant. In this version + * the BIOS can pass us the registers it has used (but not their values) + * and the control register is read/write + */ + +static void __init +centaur_mcr1_init(void) +{ + unsigned i; + u32 lo, hi; + + /* Unfortunately, MCR's are read-only, so there is no way to + * find out what the bios might have done. + */ + + rdmsr(MSR_IDT_MCR_CTRL, lo, hi); + if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ + lo &= ~0x1C0; /* clear key */ + lo |= 0x040; /* set key to 1 */ + wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ + } + + centaur_mcr_type = 1; + + /* + * Clear any unconfigured MCR's. + */ + + for (i = 0; i < 8; ++i) { + if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { + if (!(lo & (1 << (9 + i)))) + wrmsr(MSR_IDT_MCR0 + i, 0, 0); + else + /* + * If the BIOS set up an MCR we cannot see it + * but we don't wish to obliterate it + */ + centaur_mcr_reserved |= (1 << i); + } + } + /* + * Throw the main write-combining switch... + * However if OOSTORE is enabled then people have already done far + * cleverer things and we should behave. + */ + + lo |= 15; /* Write combine enables */ + wrmsr(MSR_IDT_MCR_CTRL, lo, hi); +} + +/* + * Initialise the original winchip with read only MCR registers + * no used bitmask for the BIOS to pass on and write only control + */ + +static void __init +centaur_mcr0_init(void) +{ + unsigned i; + + /* Unfortunately, MCR's are read-only, so there is no way to + * find out what the bios might have done. + */ + + /* Clear any unconfigured MCR's. + * This way we are sure that the centaur_mcr array contains the actual + * values. The disadvantage is that any BIOS tweaks are thus undone. + * + */ + for (i = 0; i < 8; ++i) { + if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) + wrmsr(MSR_IDT_MCR0 + i, 0, 0); + } + + wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ +} + +/* + * Initialise Winchip series MCR registers + */ + +static void __init +centaur_mcr_init(void) +{ + struct set_mtrr_context ctxt; + + set_mtrr_prepare_save(&ctxt); + set_mtrr_cache_disable(&ctxt); + + if (boot_cpu_data.x86_model == 4) + centaur_mcr0_init(); + else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) + centaur_mcr1_init(); + + set_mtrr_done(&ctxt); +} +#endif + +static int centaur_validate_add_page(unsigned long base, + unsigned long size, unsigned int type) +{ + /* + * FIXME: Winchip2 supports uncached + */ + if (type != MTRR_TYPE_WRCOMB && + (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { + printk(KERN_WARNING + "mtrr: only write-combining%s supported\n", + centaur_mcr_type ? " and uncacheable are" + : " is"); + return -EINVAL; + } + return 0; +} + +static struct mtrr_ops centaur_mtrr_ops = { + .vendor = X86_VENDOR_CENTAUR, +// .init = centaur_mcr_init, + .set = centaur_set_mcr, + .get = centaur_get_mcr, + .get_free_region = centaur_get_free_region, + .validate_add_page = centaur_validate_add_page, + .have_wrcomb = positive_have_wrcomb, +}; + +int __init centaur_init_mtrr(void) +{ + set_mtrr_ops(¢aur_mtrr_ops); + return 0; +} + +//arch_initcall(centaur_init_mtrr); diff --git a/arch/i386/kernel/cpu/mtrr/changelog b/arch/i386/kernel/cpu/mtrr/changelog new file mode 100644 index 00000000000..af136853595 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/changelog @@ -0,0 +1,229 @@ + ChangeLog + + Prehistory Martin Tischhäuser + Initial register-setting code (from proform-1.0). + 19971216 Richard Gooch + Original version for /proc/mtrr interface, SMP-safe. + v1.0 + 19971217 Richard Gooch + Bug fix for ioctls()'s. + Added sample code in Documentation/mtrr.txt + v1.1 + 19971218 Richard Gooch + Disallow overlapping regions. + 19971219 Jens Maurer + Register-setting fixups. + v1.2 + 19971222 Richard Gooch + Fixups for kernel 2.1.75. + v1.3 + 19971229 David Wragg + Register-setting fixups and conformity with Intel conventions. + 19971229 Richard Gooch + Cosmetic changes and wrote this ChangeLog ;-) + 19980106 Richard Gooch + Fixups for kernel 2.1.78. + v1.4 + 19980119 David Wragg + Included passive-release enable code (elsewhere in PCI setup). + v1.5 + 19980131 Richard Gooch + Replaced global kernel lock with private spinlock. + v1.6 + 19980201 Richard Gooch + Added wait for other CPUs to complete changes. + v1.7 + 19980202 Richard Gooch + Bug fix in definition of for UP. + v1.8 + 19980319 Richard Gooch + Fixups for kernel 2.1.90. + 19980323 Richard Gooch + Move SMP BIOS fixup before secondary CPUs call + v1.9 + 19980325 Richard Gooch + Fixed test for overlapping regions: confused by adjacent regions + 19980326 Richard Gooch + Added wbinvd in . + 19980401 Richard Gooch + Bug fix for non-SMP compilation. + 19980418 David Wragg + Fixed-MTRR synchronisation for SMP and use atomic operations + instead of spinlocks. + 19980418 Richard Gooch + Differentiate different MTRR register classes for BIOS fixup. + v1.10 + 19980419 David Wragg + Bug fix in variable MTRR synchronisation. + v1.11 + 19980419 Richard Gooch + Fixups for kernel 2.1.97. + v1.12 + 19980421 Richard Gooch + Safer synchronisation across CPUs when changing MTRRs. + v1.13 + 19980423 Richard Gooch + Bugfix for SMP systems without MTRR support. + v1.14 + 19980427 Richard Gooch + Trap calls to and on non-MTRR machines. + v1.15 + 19980427 Richard Gooch + Use atomic bitops for setting SMP change mask. + v1.16 + 19980428 Richard Gooch + Removed spurious diagnostic message. + v1.17 + 19980429 Richard Gooch + Moved register-setting macros into this file. + Moved setup code from init/main.c to i386-specific areas. + v1.18 + 19980502 Richard Gooch + Moved MTRR detection outside conditionals in . + v1.19 + 19980502 Richard Gooch + Documentation improvement: mention Pentium II and AGP. + v1.20 + 19980521 Richard Gooch + Only manipulate interrupt enable flag on local CPU. + Allow enclosed uncachable regions. + v1.21 + 19980611 Richard Gooch + Always define . + v1.22 + 19980901 Richard Gooch + Removed module support in order to tidy up code. + Added sanity check for / before . + Created addition queue for prior to SMP commence. + v1.23 + 19980902 Richard Gooch + Ported patch to kernel 2.1.120-pre3. + v1.24 + 19980910 Richard Gooch + Removed sanity checks and addition queue: Linus prefers an OOPS. + v1.25 + 19981001 Richard Gooch + Fixed harmless compiler warning in include/asm-i386/mtrr.h + Fixed version numbering and history for v1.23 -> v1.24. + v1.26 + 19990118 Richard Gooch + Added devfs support. + v1.27 + 19990123 Richard Gooch + Changed locking to spin with reschedule. + Made use of new . + v1.28 + 19990201 Zoltán Böszörményi + Extended the driver to be able to use Cyrix style ARRs. + 19990204 Richard Gooch + Restructured Cyrix support. + v1.29 + 19990204 Zoltán Böszörményi + Refined ARR support: enable MAPEN in set_mtrr_prepare() + and disable MAPEN in set_mtrr_done(). + 19990205 Richard Gooch + Minor cleanups. + v1.30 + 19990208 Zoltán Böszörményi + Protect plain 6x86s (and other processors without the + Page Global Enable feature) against accessing CR4 in + set_mtrr_prepare() and set_mtrr_done(). + 19990210 Richard Gooch + Turned and into function pointers. + v1.31 + 19990212 Zoltán Böszörményi + Major rewrite of cyrix_arr_init(): do not touch ARRs, + leave them as the BIOS have set them up. + Enable usage of all 8 ARRs. + Avoid multiplications by 3 everywhere and other + code clean ups/speed ups. + 19990213 Zoltán Böszörményi + Set up other Cyrix processors identical to the boot cpu. + Since Cyrix don't support Intel APIC, this is l'art pour l'art. + Weigh ARRs by size: + If size <= 32M is given, set up ARR# we were given. + If size > 32M is given, set up ARR7 only if it is free, + fail otherwise. + 19990214 Zoltán Böszörményi + Also check for size >= 256K if we are to set up ARR7, + mtrr_add() returns the value it gets from set_mtrr() + 19990218 Zoltán Böszörményi + Remove Cyrix "coma bug" workaround from here. + Moved to linux/arch/i386/kernel/setup.c and + linux/include/asm-i386/bugs.h + 19990228 Richard Gooch + Added MTRRIOC_KILL_ENTRY ioctl(2) + Trap for counter underflow in . + Trap for 4 MiB aligned regions for PPro, stepping <= 7. + 19990301 Richard Gooch + Created hook. + 19990305 Richard Gooch + Temporarily disable AMD support now MTRR capability flag is set. + v1.32 + 19990308 Zoltán Böszörményi + Adjust my changes (19990212-19990218) to Richard Gooch's + latest changes. (19990228-19990305) + v1.33 + 19990309 Richard Gooch + Fixed typo in message. + 19990310 Richard Gooch + Support K6-II/III based on Alan Cox's patches. + v1.34 + 19990511 Bart Hartgers + Support Centaur C6 MCR's. + 19990512 Richard Gooch + Minor cleanups. + v1.35 + 19990707 Zoltán Böszörményi + Check whether ARR3 is protected in cyrix_get_free_region() + and mtrr_del(). The code won't attempt to delete or change it + from now on if the BIOS protected ARR3. It silently skips ARR3 + in cyrix_get_free_region() or returns with an error code from + mtrr_del(). + 19990711 Zoltán Böszörményi + Reset some bits in the CCRs in cyrix_arr_init() to disable SMM + if ARR3 isn't protected. This is needed because if SMM is active + and ARR3 isn't protected then deleting and setting ARR3 again + may lock up the processor. With SMM entirely disabled, it does + not happen. + 19990812 Zoltán Böszörményi + Rearrange switch() statements so the driver accomodates to + the fact that the AMD Athlon handles its MTRRs the same way + as Intel does. + 19990814 Zoltán Böszörményi + Double check for Intel in mtrr_add()'s big switch() because + that revision check is only valid for Intel CPUs. + 19990819 Alan Cox + Tested Zoltan's changes on a pre production Athlon - 100% + success. + 19991008 Manfred Spraul + replaced spin_lock_reschedule() with a normal semaphore. + v1.36 + 20000221 Richard Gooch + Compile fix if procfs and devfs not enabled. + Formatting changes. + v1.37 + 20001109 H. Peter Anvin + Use the new centralized CPU feature detects. + + v1.38 + 20010309 Dave Jones + Add support for Cyrix III. + + v1.39 + 20010312 Dave Jones + Ugh, I broke AMD support. + Reworked fix by Troels Walsted Hansen + + v1.40 + 20010327 Dave Jones + Adapted Cyrix III support to include VIA C3. + + v2.0 + 20020306 Patrick Mochel + Split mtrr.c -> mtrr/*.c + Converted to Linux Kernel Coding Style + Fixed several minor nits in form + Moved some SMP-only functions out, so they can be used + for power management in the future. + TODO: Fix user interface cruft. diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c new file mode 100644 index 00000000000..933b0dd62f4 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/cyrix.c @@ -0,0 +1,364 @@ +#include +#include +#include +#include +#include +#include "mtrr.h" + +int arr3_protected; + +static void +cyrix_get_arr(unsigned int reg, unsigned long *base, + unsigned int *size, mtrr_type * type) +{ + unsigned long flags; + unsigned char arr, ccr3, rcr, shift; + + arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ + + /* Save flags and disable interrupts */ + local_irq_save(flags); + + ccr3 = getCx86(CX86_CCR3); + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ + ((unsigned char *) base)[3] = getCx86(arr); + ((unsigned char *) base)[2] = getCx86(arr + 1); + ((unsigned char *) base)[1] = getCx86(arr + 2); + rcr = getCx86(CX86_RCR_BASE + reg); + setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ + + /* Enable interrupts if it was enabled previously */ + local_irq_restore(flags); + shift = ((unsigned char *) base)[1] & 0x0f; + *base >>= PAGE_SHIFT; + + /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 + * Note: shift==0xf means 4G, this is unsupported. + */ + if (shift) + *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1); + else + *size = 0; + + /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */ + if (reg < 7) { + switch (rcr) { + case 1: + *type = MTRR_TYPE_UNCACHABLE; + break; + case 8: + *type = MTRR_TYPE_WRBACK; + break; + case 9: + *type = MTRR_TYPE_WRCOMB; + break; + case 24: + default: + *type = MTRR_TYPE_WRTHROUGH; + break; + } + } else { + switch (rcr) { + case 0: + *type = MTRR_TYPE_UNCACHABLE; + break; + case 8: + *type = MTRR_TYPE_WRCOMB; + break; + case 9: + *type = MTRR_TYPE_WRBACK; + break; + case 25: + default: + *type = MTRR_TYPE_WRTHROUGH; + break; + } + } +} + +static int +cyrix_get_free_region(unsigned long base, unsigned long size) +/* [SUMMARY] Get a free ARR. + The starting (base) address of the region. + The size (in bytes) of the region. + [RETURNS] The index of the region on success, else -1 on error. +*/ +{ + int i; + mtrr_type ltype; + unsigned long lbase; + unsigned int lsize; + + /* If we are to set up a region >32M then look at ARR7 immediately */ + if (size > 0x2000) { + cyrix_get_arr(7, &lbase, &lsize, <ype); + if (lsize == 0) + return 7; + /* Else try ARR0-ARR6 first */ + } else { + for (i = 0; i < 7; i++) { + cyrix_get_arr(i, &lbase, &lsize, <ype); + if ((i == 3) && arr3_protected) + continue; + if (lsize == 0) + return i; + } + /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ + cyrix_get_arr(i, &lbase, &lsize, <ype); + if ((lsize == 0) && (size >= 0x40)) + return i; + } + return -ENOSPC; +} + +static u32 cr4 = 0; +static u32 ccr3; + +static void prepare_set(void) +{ + u32 cr0; + + /* Save value of CR4 and clear Page Global Enable (bit 7) */ + if ( cpu_has_pge ) { + cr4 = read_cr4(); + write_cr4(cr4 & (unsigned char) ~(1 << 7)); + } + + /* Disable and flush caches. Note that wbinvd flushes the TLBs as + a side-effect */ + cr0 = read_cr0() | 0x40000000; + wbinvd(); + write_cr0(cr0); + wbinvd(); + + /* Cyrix ARRs - everything else were excluded at the top */ + ccr3 = getCx86(CX86_CCR3); + + /* Cyrix ARRs - everything else were excluded at the top */ + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); + +} + +static void post_set(void) +{ + /* Flush caches and TLBs */ + wbinvd(); + + /* Cyrix ARRs - everything else was excluded at the top */ + setCx86(CX86_CCR3, ccr3); + + /* Enable caches */ + write_cr0(read_cr0() & 0xbfffffff); + + /* Restore value of CR4 */ + if ( cpu_has_pge ) + write_cr4(cr4); +} + +static void cyrix_set_arr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +{ + unsigned char arr, arr_type, arr_size; + + arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ + + /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */ + if (reg >= 7) + size >>= 6; + + size &= 0x7fff; /* make sure arr_size <= 14 */ + for (arr_size = 0; size; arr_size++, size >>= 1) ; + + if (reg < 7) { + switch (type) { + case MTRR_TYPE_UNCACHABLE: + arr_type = 1; + break; + case MTRR_TYPE_WRCOMB: + arr_type = 9; + break; + case MTRR_TYPE_WRTHROUGH: + arr_type = 24; + break; + default: + arr_type = 8; + break; + } + } else { + switch (type) { + case MTRR_TYPE_UNCACHABLE: + arr_type = 0; + break; + case MTRR_TYPE_WRCOMB: + arr_type = 8; + break; + case MTRR_TYPE_WRTHROUGH: + arr_type = 25; + break; + default: + arr_type = 9; + break; + } + } + + prepare_set(); + + base <<= PAGE_SHIFT; + setCx86(arr, ((unsigned char *) &base)[3]); + setCx86(arr + 1, ((unsigned char *) &base)[2]); + setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); + setCx86(CX86_RCR_BASE + reg, arr_type); + + post_set(); +} + +typedef struct { + unsigned long base; + unsigned int size; + mtrr_type type; +} arr_state_t; + +static arr_state_t arr_state[8] __initdata = { + {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, + {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} +}; + +static unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 }; + +static void cyrix_set_all(void) +{ + int i; + + prepare_set(); + + /* the CCRs are not contiguous */ + for (i = 0; i < 4; i++) + setCx86(CX86_CCR0 + i, ccr_state[i]); + for (; i < 7; i++) + setCx86(CX86_CCR4 + i, ccr_state[i]); + for (i = 0; i < 8; i++) + cyrix_set_arr(i, arr_state[i].base, + arr_state[i].size, arr_state[i].type); + + post_set(); +} + +#if 0 +/* + * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection + * with the SMM (System Management Mode) mode. So we need the following: + * Check whether SMI_LOCK (CCR3 bit 0) is set + * if it is set, write a warning message: ARR3 cannot be changed! + * (it cannot be changed until the next processor reset) + * if it is reset, then we can change it, set all the needed bits: + * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset) + * - disable access to SMM memory (CCR1 bit 2 reset) + * - disable SMM mode (CCR1 bit 1 reset) + * - disable write protection of ARR3 (CCR6 bit 1 reset) + * - (maybe) disable ARR3 + * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set) + */ +static void __init +cyrix_arr_init(void) +{ + struct set_mtrr_context ctxt; + unsigned char ccr[7]; + int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 }; +#ifdef CONFIG_SMP + int i; +#endif + + /* flush cache and enable MAPEN */ + set_mtrr_prepare_save(&ctxt); + set_mtrr_cache_disable(&ctxt); + + /* Save all CCRs locally */ + ccr[0] = getCx86(CX86_CCR0); + ccr[1] = getCx86(CX86_CCR1); + ccr[2] = getCx86(CX86_CCR2); + ccr[3] = ctxt.ccr3; + ccr[4] = getCx86(CX86_CCR4); + ccr[5] = getCx86(CX86_CCR5); + ccr[6] = getCx86(CX86_CCR6); + + if (ccr[3] & 1) { + ccrc[3] = 1; + arr3_protected = 1; + } else { + /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and + * access to SMM memory through ARR3 (bit 7). + */ + if (ccr[1] & 0x80) { + ccr[1] &= 0x7f; + ccrc[1] |= 0x80; + } + if (ccr[1] & 0x04) { + ccr[1] &= 0xfb; + ccrc[1] |= 0x04; + } + if (ccr[1] & 0x02) { + ccr[1] &= 0xfd; + ccrc[1] |= 0x02; + } + arr3_protected = 0; + if (ccr[6] & 0x02) { + ccr[6] &= 0xfd; + ccrc[6] = 1; /* Disable write protection of ARR3 */ + setCx86(CX86_CCR6, ccr[6]); + } + /* Disable ARR3. This is safe now that we disabled SMM. */ + /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */ + } + /* If we changed CCR1 in memory, change it in the processor, too. */ + if (ccrc[1]) + setCx86(CX86_CCR1, ccr[1]); + + /* Enable ARR usage by the processor */ + if (!(ccr[5] & 0x20)) { + ccr[5] |= 0x20; + ccrc[5] = 1; + setCx86(CX86_CCR5, ccr[5]); + } +#ifdef CONFIG_SMP + for (i = 0; i < 7; i++) + ccr_state[i] = ccr[i]; + for (i = 0; i < 8; i++) + cyrix_get_arr(i, + &arr_state[i].base, &arr_state[i].size, + &arr_state[i].type); +#endif + + set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */ + + if (ccrc[5]) + printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n"); + if (ccrc[3]) + printk(KERN_INFO "mtrr: ARR3 cannot be changed\n"); +/* + if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n"); + if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n"); + if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n"); +*/ + if (ccrc[6]) + printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n"); +} +#endif + +static struct mtrr_ops cyrix_mtrr_ops = { + .vendor = X86_VENDOR_CYRIX, +// .init = cyrix_arr_init, + .set_all = cyrix_set_all, + .set = cyrix_set_arr, + .get = cyrix_get_arr, + .get_free_region = cyrix_get_free_region, + .validate_add_page = generic_validate_add_page, + .have_wrcomb = positive_have_wrcomb, +}; + +int __init cyrix_init_mtrr(void) +{ + set_mtrr_ops(&cyrix_mtrr_ops); + return 0; +} + +//arch_initcall(cyrix_init_mtrr); diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c new file mode 100644 index 00000000000..a4cce454d09 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/generic.c @@ -0,0 +1,417 @@ +/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong + because MTRRs can span upto 40 bits (36bits on most modern x86) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mtrr.h" + +struct mtrr_state { + struct mtrr_var_range *var_ranges; + mtrr_type fixed_ranges[NUM_FIXED_RANGES]; + unsigned char enabled; + mtrr_type def_type; +}; + +static unsigned long smp_changes_mask; +static struct mtrr_state mtrr_state = {}; + +/* Get the MSR pair relating to a var range */ +static void __init +get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) +{ + rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); + rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); +} + +static void __init +get_fixed_ranges(mtrr_type * frs) +{ + unsigned int *p = (unsigned int *) frs; + int i; + + rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); + + for (i = 0; i < 2; i++) + rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); + for (i = 0; i < 8; i++) + rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); +} + +/* Grab all of the MTRR state for this CPU into *state */ +void __init get_mtrr_state(void) +{ + unsigned int i; + struct mtrr_var_range *vrs; + unsigned lo, dummy; + + if (!mtrr_state.var_ranges) { + mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range), + GFP_KERNEL); + if (!mtrr_state.var_ranges) + return; + } + vrs = mtrr_state.var_ranges; + + for (i = 0; i < num_var_ranges; i++) + get_mtrr_var_range(i, &vrs[i]); + get_fixed_ranges(mtrr_state.fixed_ranges); + + rdmsr(MTRRdefType_MSR, lo, dummy); + mtrr_state.def_type = (lo & 0xff); + mtrr_state.enabled = (lo & 0xc00) >> 10; +} + +/* Free resources associated with a struct mtrr_state */ +void __init finalize_mtrr_state(void) +{ + if (mtrr_state.var_ranges) + kfree(mtrr_state.var_ranges); + mtrr_state.var_ranges = NULL; +} + +/* Some BIOS's are fucked and don't set all MTRRs the same! */ +void __init mtrr_state_warn(void) +{ + unsigned long mask = smp_changes_mask; + + if (!mask) + return; + if (mask & MTRR_CHANGE_MASK_FIXED) + printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); + if (mask & MTRR_CHANGE_MASK_VARIABLE) + printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); + if (mask & MTRR_CHANGE_MASK_DEFTYPE) + printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); + printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); + printk(KERN_INFO "mtrr: corrected configuration.\n"); +} + +/* Doesn't attempt to pass an error out to MTRR users + because it's quite complicated in some cases and probably not + worth it because the best error handling is to ignore it. */ +void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) +{ + if (wrmsr_safe(msr, a, b) < 0) + printk(KERN_ERR + "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", + smp_processor_id(), msr, a, b); +} + +int generic_get_free_region(unsigned long base, unsigned long size) +/* [SUMMARY] Get a free MTRR. + The starting (base) address of the region. + The size (in bytes) of the region. + [RETURNS] The index of the region on success, else -1 on error. +*/ +{ + int i, max; + mtrr_type ltype; + unsigned long lbase; + unsigned lsize; + + max = num_var_ranges; + for (i = 0; i < max; ++i) { + mtrr_if->get(i, &lbase, &lsize, <ype); + if (lsize == 0) + return i; + } + return -ENOSPC; +} + +void generic_get_mtrr(unsigned int reg, unsigned long *base, + unsigned int *size, mtrr_type * type) +{ + unsigned int mask_lo, mask_hi, base_lo, base_hi; + + rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); + if ((mask_lo & 0x800) == 0) { + /* Invalid (i.e. free) range */ + *base = 0; + *size = 0; + *type = 0; + return; + } + + rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); + + /* Work out the shifted address mask. */ + mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) + | mask_lo >> PAGE_SHIFT; + + /* This works correctly if size is a power of two, i.e. a + contiguous range. */ + *size = -mask_lo; + *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; + *type = base_lo & 0xff; +} + +static int set_fixed_ranges(mtrr_type * frs) +{ + unsigned int *p = (unsigned int *) frs; + int changed = FALSE; + int i; + unsigned int lo, hi; + + rdmsr(MTRRfix64K_00000_MSR, lo, hi); + if (p[0] != lo || p[1] != hi) { + mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); + changed = TRUE; + } + + for (i = 0; i < 2; i++) { + rdmsr(MTRRfix16K_80000_MSR + i, lo, hi); + if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { + mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], + p[3 + i * 2]); + changed = TRUE; + } + } + + for (i = 0; i < 8; i++) { + rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi); + if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { + mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], + p[7 + i * 2]); + changed = TRUE; + } + } + return changed; +} + +/* Set the MSR pair relating to a var range. Returns TRUE if + changes are made */ +static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) +{ + unsigned int lo, hi; + int changed = FALSE; + + rdmsr(MTRRphysBase_MSR(index), lo, hi); + if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) + || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) { + mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); + changed = TRUE; + } + + rdmsr(MTRRphysMask_MSR(index), lo, hi); + + if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) + || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) { + mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); + changed = TRUE; + } + return changed; +} + +static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi) +/* [SUMMARY] Set the MTRR state for this CPU. + The MTRR state information to read. + Some relevant CPU context. + [NOTE] The CPU must already be in a safe state for MTRR changes. + [RETURNS] 0 if no changes made, else a mask indication what was changed. +*/ +{ + unsigned int i; + unsigned long change_mask = 0; + + for (i = 0; i < num_var_ranges; i++) + if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) + change_mask |= MTRR_CHANGE_MASK_VARIABLE; + + if (set_fixed_ranges(mtrr_state.fixed_ranges)) + change_mask |= MTRR_CHANGE_MASK_FIXED; + + /* Set_mtrr_restore restores the old value of MTRRdefType, + so to set it we fiddle with the saved value */ + if ((deftype_lo & 0xff) != mtrr_state.def_type + || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { + deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10); + change_mask |= MTRR_CHANGE_MASK_DEFTYPE; + } + + return change_mask; +} + + +static unsigned long cr4 = 0; +static u32 deftype_lo, deftype_hi; +static DEFINE_SPINLOCK(set_atomicity_lock); + +/* + * Since we are disabling the cache don't allow any interrupts - they + * would run extremely slow and would only increase the pain. The caller must + * ensure that local interrupts are disabled and are reenabled after post_set() + * has been called. + */ + +static void prepare_set(void) +{ + unsigned long cr0; + + /* Note that this is not ideal, since the cache is only flushed/disabled + for this CPU while the MTRRs are changed, but changing this requires + more invasive changes to the way the kernel boots */ + + spin_lock(&set_atomicity_lock); + + /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ + cr0 = read_cr0() | 0x40000000; /* set CD flag */ + write_cr0(cr0); + wbinvd(); + + /* Save value of CR4 and clear Page Global Enable (bit 7) */ + if ( cpu_has_pge ) { + cr4 = read_cr4(); + write_cr4(cr4 & ~X86_CR4_PGE); + } + + /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ + __flush_tlb(); + + /* Save MTRR state */ + rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); + + /* Disable MTRRs, and set the default type to uncached */ + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); +} + +static void post_set(void) +{ + /* Flush TLBs (no need to flush caches - they are disabled) */ + __flush_tlb(); + + /* Intel (P6) standard MTRRs */ + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); + + /* Enable caches */ + write_cr0(read_cr0() & 0xbfffffff); + + /* Restore value of CR4 */ + if ( cpu_has_pge ) + write_cr4(cr4); + spin_unlock(&set_atomicity_lock); +} + +static void generic_set_all(void) +{ + unsigned long mask, count; + unsigned long flags; + + local_irq_save(flags); + prepare_set(); + + /* Actually set the state */ + mask = set_mtrr_state(deftype_lo,deftype_hi); + + post_set(); + local_irq_restore(flags); + + /* Use the atomic bitops to update the global mask */ + for (count = 0; count < sizeof mask * 8; ++count) { + if (mask & 0x01) + set_bit(count, &smp_changes_mask); + mask >>= 1; + } + +} + +static void generic_set_mtrr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +/* [SUMMARY] Set variable MTRR register on the local CPU. + The register to set. + The base address of the region. + The size of the region. If this is 0 the region is disabled. + The type of the region. + If TRUE, do the change safely. If FALSE, safety measures should + be done externally. + [RETURNS] Nothing. +*/ +{ + unsigned long flags; + + local_irq_save(flags); + prepare_set(); + + if (size == 0) { + /* The invalid bit is kept in the mask, so we simply clear the + relevant mask register to disable a range. */ + mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); + } else { + mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, + (base & size_and_mask) >> (32 - PAGE_SHIFT)); + mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, + (-size & size_and_mask) >> (32 - PAGE_SHIFT)); + } + + post_set(); + local_irq_restore(flags); +} + +int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) +{ + unsigned long lbase, last; + + /* For Intel PPro stepping <= 7, must be 4 MiB aligned + and not touch 0x70000000->0x7003FFFF */ + if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model == 1 && + boot_cpu_data.x86_mask <= 7) { + if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { + printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); + return -EINVAL; + } + if (!(base + size < 0x70000000 || base > 0x7003FFFF) && + (type == MTRR_TYPE_WRCOMB + || type == MTRR_TYPE_WRBACK)) { + printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); + return -EINVAL; + } + } + + if (base + size < 0x100) { + printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", + base, size); + return -EINVAL; + } + /* Check upper bits of base and last are equal and lower bits are 0 + for base and 1 for last */ + last = base + size - 1; + for (lbase = base; !(lbase & 1) && (last & 1); + lbase = lbase >> 1, last = last >> 1) ; + if (lbase != last) { + printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", + base, size); + return -EINVAL; + } + return 0; +} + + +static int generic_have_wrcomb(void) +{ + unsigned long config, dummy; + rdmsr(MTRRcap_MSR, config, dummy); + return (config & (1 << 10)); +} + +int positive_have_wrcomb(void) +{ + return 1; +} + +/* generic structure... + */ +struct mtrr_ops generic_mtrr_ops = { + .use_intel_if = 1, + .set_all = generic_set_all, + .get = generic_get_mtrr, + .get_free_region = generic_get_free_region, + .set = generic_set_mtrr, + .validate_add_page = generic_validate_add_page, + .have_wrcomb = generic_have_wrcomb, +}; diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c new file mode 100644 index 00000000000..1923e0aed26 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/if.c @@ -0,0 +1,374 @@ +#include +#include +#include +#include +#include +#include + +#define LINE_SIZE 80 + +#include +#include "mtrr.h" + +/* RED-PEN: this is accessed without any locking */ +extern unsigned int *usage_table; + + +#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) + +static char *mtrr_strings[MTRR_NUM_TYPES] = +{ + "uncachable", /* 0 */ + "write-combining", /* 1 */ + "?", /* 2 */ + "?", /* 3 */ + "write-through", /* 4 */ + "write-protect", /* 5 */ + "write-back", /* 6 */ +}; + +char *mtrr_attrib_to_str(int x) +{ + return (x <= 6) ? mtrr_strings[x] : "?"; +} + +#ifdef CONFIG_PROC_FS + +static int +mtrr_file_add(unsigned long base, unsigned long size, + unsigned int type, char increment, struct file *file, int page) +{ + int reg, max; + unsigned int *fcount = FILE_FCOUNT(file); + + max = num_var_ranges; + if (fcount == NULL) { + fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL); + if (!fcount) + return -ENOMEM; + memset(fcount, 0, max * sizeof *fcount); + FILE_FCOUNT(file) = fcount; + } + if (!page) { + if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) + return -EINVAL; + base >>= PAGE_SHIFT; + size >>= PAGE_SHIFT; + } + reg = mtrr_add_page(base, size, type, 1); + if (reg >= 0) + ++fcount[reg]; + return reg; +} + +static int +mtrr_file_del(unsigned long base, unsigned long size, + struct file *file, int page) +{ + int reg; + unsigned int *fcount = FILE_FCOUNT(file); + + if (!page) { + if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) + return -EINVAL; + base >>= PAGE_SHIFT; + size >>= PAGE_SHIFT; + } + reg = mtrr_del_page(-1, base, size); + if (reg < 0) + return reg; + if (fcount == NULL) + return reg; + if (fcount[reg] < 1) + return -EINVAL; + --fcount[reg]; + return reg; +} + +/* RED-PEN: seq_file can seek now. this is ignored. */ +static ssize_t +mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) +/* Format of control line: + "base=%Lx size=%Lx type=%s" OR: + "disable=%d" +*/ +{ + int i, err; + unsigned long reg; + unsigned long long base, size; + char *ptr; + char line[LINE_SIZE]; + size_t linelen; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!len) + return -EINVAL; + memset(line, 0, LINE_SIZE); + if (len > LINE_SIZE) + len = LINE_SIZE; + if (copy_from_user(line, buf, len - 1)) + return -EFAULT; + linelen = strlen(line); + ptr = line + linelen - 1; + if (linelen && *ptr == '\n') + *ptr = '\0'; + if (!strncmp(line, "disable=", 8)) { + reg = simple_strtoul(line + 8, &ptr, 0); + err = mtrr_del_page(reg, 0, 0); + if (err < 0) + return err; + return len; + } + if (strncmp(line, "base=", 5)) + return -EINVAL; + base = simple_strtoull(line + 5, &ptr, 0); + for (; isspace(*ptr); ++ptr) ; + if (strncmp(ptr, "size=", 5)) + return -EINVAL; + size = simple_strtoull(ptr + 5, &ptr, 0); + if ((base & 0xfff) || (size & 0xfff)) + return -EINVAL; + for (; isspace(*ptr); ++ptr) ; + if (strncmp(ptr, "type=", 5)) + return -EINVAL; + ptr += 5; + for (; isspace(*ptr); ++ptr) ; + for (i = 0; i < MTRR_NUM_TYPES; ++i) { + if (strcmp(ptr, mtrr_strings[i])) + continue; + base >>= PAGE_SHIFT; + size >>= PAGE_SHIFT; + err = + mtrr_add_page((unsigned long) base, (unsigned long) size, i, + 1); + if (err < 0) + return err; + return len; + } + return -EINVAL; +} + +static int +mtrr_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long __arg) +{ + int err; + mtrr_type type; + struct mtrr_sentry sentry; + struct mtrr_gentry gentry; + void __user *arg = (void __user *) __arg; + + switch (cmd) { + default: + return -ENOTTY; + case MTRRIOC_ADD_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = + mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, + file, 0); + if (err < 0) + return err; + break; + case MTRRIOC_SET_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); + if (err < 0) + return err; + break; + case MTRRIOC_DEL_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_file_del(sentry.base, sentry.size, file, 0); + if (err < 0) + return err; + break; + case MTRRIOC_KILL_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_del(-1, sentry.base, sentry.size); + if (err < 0) + return err; + break; + case MTRRIOC_GET_ENTRY: + if (copy_from_user(&gentry, arg, sizeof gentry)) + return -EFAULT; + if (gentry.regnum >= num_var_ranges) + return -EINVAL; + mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); + + /* Hide entries that go above 4GB */ + if (gentry.base + gentry.size > 0x100000 + || gentry.size == 0x100000) + gentry.base = gentry.size = gentry.type = 0; + else { + gentry.base <<= PAGE_SHIFT; + gentry.size <<= PAGE_SHIFT; + gentry.type = type; + } + + if (copy_to_user(arg, &gentry, sizeof gentry)) + return -EFAULT; + break; + case MTRRIOC_ADD_PAGE_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = + mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, + file, 1); + if (err < 0) + return err; + break; + case MTRRIOC_SET_PAGE_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); + if (err < 0) + return err; + break; + case MTRRIOC_DEL_PAGE_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_file_del(sentry.base, sentry.size, file, 1); + if (err < 0) + return err; + break; + case MTRRIOC_KILL_PAGE_ENTRY: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&sentry, arg, sizeof sentry)) + return -EFAULT; + err = mtrr_del_page(-1, sentry.base, sentry.size); + if (err < 0) + return err; + break; + case MTRRIOC_GET_PAGE_ENTRY: + if (copy_from_user(&gentry, arg, sizeof gentry)) + return -EFAULT; + if (gentry.regnum >= num_var_ranges) + return -EINVAL; + mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); + gentry.type = type; + + if (copy_to_user(arg, &gentry, sizeof gentry)) + return -EFAULT; + break; + } + return 0; +} + +static int +mtrr_close(struct inode *ino, struct file *file) +{ + int i, max; + unsigned int *fcount = FILE_FCOUNT(file); + + if (fcount != NULL) { + max = num_var_ranges; + for (i = 0; i < max; ++i) { + while (fcount[i] > 0) { + mtrr_del(i, 0, 0); + --fcount[i]; + } + } + kfree(fcount); + FILE_FCOUNT(file) = NULL; + } + return single_release(ino, file); +} + +static int mtrr_seq_show(struct seq_file *seq, void *offset); + +static int mtrr_open(struct inode *inode, struct file *file) +{ + if (!mtrr_if) + return -EIO; + if (!mtrr_if->get) + return -ENXIO; + return single_open(file, mtrr_seq_show, NULL); +} + +static struct file_operations mtrr_fops = { + .owner = THIS_MODULE, + .open = mtrr_open, + .read = seq_read, + .llseek = seq_lseek, + .write = mtrr_write, + .ioctl = mtrr_ioctl, + .release = mtrr_close, +}; + + +static struct proc_dir_entry *proc_root_mtrr; + + +static int mtrr_seq_show(struct seq_file *seq, void *offset) +{ + char factor; + int i, max, len; + mtrr_type type; + unsigned long base; + unsigned int size; + + len = 0; + max = num_var_ranges; + for (i = 0; i < max; i++) { + mtrr_if->get(i, &base, &size, &type); + if (size == 0) + usage_table[i] = 0; + else { + if (size < (0x100000 >> PAGE_SHIFT)) { + /* less than 1MB */ + factor = 'K'; + size <<= PAGE_SHIFT - 10; + } else { + factor = 'M'; + size >>= 20 - PAGE_SHIFT; + } + /* RED-PEN: base can be > 32bit */ + len += seq_printf(seq, + "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n", + i, base, base >> (20 - PAGE_SHIFT), size, factor, + mtrr_attrib_to_str(type), usage_table[i]); + } + } + return 0; +} + +static int __init mtrr_if_init(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if ((!cpu_has(c, X86_FEATURE_MTRR)) && + (!cpu_has(c, X86_FEATURE_K6_MTRR)) && + (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && + (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) + return -ENODEV; + + proc_root_mtrr = + create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); + if (proc_root_mtrr) { + proc_root_mtrr->owner = THIS_MODULE; + proc_root_mtrr->proc_fops = &mtrr_fops; + } + return 0; +} + +arch_initcall(mtrr_if_init); +#endif /* CONFIG_PROC_FS */ diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c new file mode 100644 index 00000000000..8f67b490a7f --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -0,0 +1,693 @@ +/* Generic MTRR (Memory Type Range Register) driver. + + Copyright (C) 1997-2000 Richard Gooch + Copyright (c) 2002 Patrick Mochel + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Richard Gooch may be reached by email at rgooch@atnf.csiro.au + The postal address is: + Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. + + Source: "Pentium Pro Family Developer's Manual, Volume 3: + Operating System Writer's Guide" (Intel document number 242692), + section 11.11.7 + + This was cleaned and made readable by Patrick Mochel + on 6-7 March 2002. + Source: Intel Architecture Software Developers Manual, Volume 3: + System Programming Guide; Section 9.11. (1997 edition - PPro). +*/ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include "mtrr.h" + +#define MTRR_VERSION "2.0 (20020519)" + +u32 num_var_ranges = 0; + +unsigned int *usage_table; +static DECLARE_MUTEX(main_lock); + +u32 size_or_mask, size_and_mask; + +static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; + +struct mtrr_ops * mtrr_if = NULL; + +static void set_mtrr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type); + +extern int arr3_protected; + +void set_mtrr_ops(struct mtrr_ops * ops) +{ + if (ops->vendor && ops->vendor < X86_VENDOR_NUM) + mtrr_ops[ops->vendor] = ops; +} + +/* Returns non-zero if we have the write-combining memory type */ +static int have_wrcomb(void) +{ + struct pci_dev *dev; + + if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { + /* ServerWorks LE chipsets have problems with write-combining + Don't allow it and leave room for other chipsets to be tagged */ + if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && + dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { + printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n"); + pci_dev_put(dev); + return 0; + } + /* Intel 450NX errata # 23. Non ascending cachline evictions to + write combining memory may resulting in data corruption */ + if (dev->vendor == PCI_VENDOR_ID_INTEL && + dev->device == PCI_DEVICE_ID_INTEL_82451NX) { + printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); + pci_dev_put(dev); + return 0; + } + pci_dev_put(dev); + } + return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); +} + +/* This function returns the number of variable MTRRs */ +static void __init set_num_var_ranges(void) +{ + unsigned long config = 0, dummy; + + if (use_intel()) { + rdmsr(MTRRcap_MSR, config, dummy); + } else if (is_cpu(AMD)) + config = 2; + else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) + config = 8; + num_var_ranges = config & 0xff; +} + +static void __init init_table(void) +{ + int i, max; + + max = num_var_ranges; + if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL)) + == NULL) { + printk(KERN_ERR "mtrr: could not allocate\n"); + return; + } + for (i = 0; i < max; i++) + usage_table[i] = 1; +} + +struct set_mtrr_data { + atomic_t count; + atomic_t gate; + unsigned long smp_base; + unsigned long smp_size; + unsigned int smp_reg; + mtrr_type smp_type; +}; + +#ifdef CONFIG_SMP + +static void ipi_handler(void *info) +/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. + [RETURNS] Nothing. +*/ +{ + struct set_mtrr_data *data = info; + unsigned long flags; + + local_irq_save(flags); + + atomic_dec(&data->count); + while(!atomic_read(&data->gate)) + cpu_relax(); + + /* The master has cleared me to execute */ + if (data->smp_reg != ~0U) + mtrr_if->set(data->smp_reg, data->smp_base, + data->smp_size, data->smp_type); + else + mtrr_if->set_all(); + + atomic_dec(&data->count); + while(atomic_read(&data->gate)) + cpu_relax(); + + atomic_dec(&data->count); + local_irq_restore(flags); +} + +#endif + +/** + * set_mtrr - update mtrrs on all processors + * @reg: mtrr in question + * @base: mtrr base + * @size: mtrr size + * @type: mtrr type + * + * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: + * + * 1. Send IPI to do the following: + * 2. Disable Interrupts + * 3. Wait for all procs to do so + * 4. Enter no-fill cache mode + * 5. Flush caches + * 6. Clear PGE bit + * 7. Flush all TLBs + * 8. Disable all range registers + * 9. Update the MTRRs + * 10. Enable all range registers + * 11. Flush all TLBs and caches again + * 12. Enter normal cache mode and reenable caching + * 13. Set PGE + * 14. Wait for buddies to catch up + * 15. Enable interrupts. + * + * What does that mean for us? Well, first we set data.count to the number + * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait + * until it hits 0 and proceed. We set the data.gate flag and reset data.count. + * Meanwhile, they are waiting for that flag to be set. Once it's set, each + * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it + * differently, so we call mtrr_if->set() callback and let them take care of it. + * When they're done, they again decrement data->count and wait for data.gate to + * be reset. + * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. + * Everyone then enables interrupts and we all continue on. + * + * Note that the mechanism is the same for UP systems, too; all the SMP stuff + * becomes nops. + */ +static void set_mtrr(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) +{ + struct set_mtrr_data data; + unsigned long flags; + + data.smp_reg = reg; + data.smp_base = base; + data.smp_size = size; + data.smp_type = type; + atomic_set(&data.count, num_booting_cpus() - 1); + atomic_set(&data.gate,0); + + /* Start the ball rolling on other CPUs */ + if (smp_call_function(ipi_handler, &data, 1, 0) != 0) + panic("mtrr: timed out waiting for other CPUs\n"); + + local_irq_save(flags); + + while(atomic_read(&data.count)) + cpu_relax(); + + /* ok, reset count and toggle gate */ + atomic_set(&data.count, num_booting_cpus() - 1); + atomic_set(&data.gate,1); + + /* do our MTRR business */ + + /* HACK! + * We use this same function to initialize the mtrrs on boot. + * The state of the boot cpu's mtrrs has been saved, and we want + * to replicate across all the APs. + * If we're doing that @reg is set to something special... + */ + if (reg != ~0U) + mtrr_if->set(reg,base,size,type); + + /* wait for the others */ + while(atomic_read(&data.count)) + cpu_relax(); + + atomic_set(&data.count, num_booting_cpus() - 1); + atomic_set(&data.gate,0); + + /* + * Wait here for everyone to have seen the gate change + * So we're the last ones to touch 'data' + */ + while(atomic_read(&data.count)) + cpu_relax(); + + local_irq_restore(flags); +} + +/** + * mtrr_add_page - Add a memory type region + * @base: Physical base address of region in pages (4 KB) + * @size: Physical size of region in pages (4 KB) + * @type: Type of MTRR desired + * @increment: If this is true do usage counting on the region + * + * Memory type region registers control the caching on newer Intel and + * non Intel processors. This function allows drivers to request an + * MTRR is added. The details and hardware specifics of each processor's + * implementation are hidden from the caller, but nevertheless the + * caller should expect to need to provide a power of two size on an + * equivalent power of two boundary. + * + * If the region cannot be added either because all regions are in use + * or the CPU cannot support it a negative value is returned. On success + * the register number for this entry is returned, but should be treated + * as a cookie only. + * + * On a multiprocessor machine the changes are made to all processors. + * This is required on x86 by the Intel processors. + * + * The available types are + * + * %MTRR_TYPE_UNCACHABLE - No caching + * + * %MTRR_TYPE_WRBACK - Write data back in bursts whenever + * + * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts + * + * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes + * + * BUGS: Needs a quiet flag for the cases where drivers do not mind + * failures and do not wish system log messages to be sent. + */ + +int mtrr_add_page(unsigned long base, unsigned long size, + unsigned int type, char increment) +{ + int i; + mtrr_type ltype; + unsigned long lbase; + unsigned int lsize; + int error; + + if (!mtrr_if) + return -ENXIO; + + if ((error = mtrr_if->validate_add_page(base,size,type))) + return error; + + if (type >= MTRR_NUM_TYPES) { + printk(KERN_WARNING "mtrr: type: %u invalid\n", type); + return -EINVAL; + } + + /* If the type is WC, check that this processor supports it */ + if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { + printk(KERN_WARNING + "mtrr: your processor doesn't support write-combining\n"); + return -ENOSYS; + } + + if (base & size_or_mask || size & size_or_mask) { + printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); + return -EINVAL; + } + + error = -EINVAL; + + /* Search for existing MTRR */ + down(&main_lock); + for (i = 0; i < num_var_ranges; ++i) { + mtrr_if->get(i, &lbase, &lsize, <ype); + if (base >= lbase + lsize) + continue; + if ((base < lbase) && (base + size <= lbase)) + continue; + /* At this point we know there is some kind of overlap/enclosure */ + if ((base < lbase) || (base + size > lbase + lsize)) { + printk(KERN_WARNING + "mtrr: 0x%lx000,0x%lx000 overlaps existing" + " 0x%lx000,0x%x000\n", base, size, lbase, + lsize); + goto out; + } + /* New region is enclosed by an existing region */ + if (ltype != type) { + if (type == MTRR_TYPE_UNCACHABLE) + continue; + printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", + base, size, mtrr_attrib_to_str(ltype), + mtrr_attrib_to_str(type)); + goto out; + } + if (increment) + ++usage_table[i]; + error = i; + goto out; + } + /* Search for an empty MTRR */ + i = mtrr_if->get_free_region(base, size); + if (i >= 0) { + set_mtrr(i, base, size, type); + usage_table[i] = 1; + } else + printk(KERN_INFO "mtrr: no more MTRRs available\n"); + error = i; + out: + up(&main_lock); + return error; +} + +/** + * mtrr_add - Add a memory type region + * @base: Physical base address of region + * @size: Physical size of region + * @type: Type of MTRR desired + * @increment: If this is true do usage counting on the region + * + * Memory type region registers control the caching on newer Intel and + * non Intel processors. This function allows drivers to request an + * MTRR is added. The details and hardware specifics of each processor's + * implementation are hidden from the caller, but nevertheless the + * caller should expect to need to provide a power of two size on an + * equivalent power of two boundary. + * + * If the region cannot be added either because all regions are in use + * or the CPU cannot support it a negative value is returned. On success + * the register number for this entry is returned, but should be treated + * as a cookie only. + * + * On a multiprocessor machine the changes are made to all processors. + * This is required on x86 by the Intel processors. + * + * The available types are + * + * %MTRR_TYPE_UNCACHABLE - No caching + * + * %MTRR_TYPE_WRBACK - Write data back in bursts whenever + * + * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts + * + * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes + * + * BUGS: Needs a quiet flag for the cases where drivers do not mind + * failures and do not wish system log messages to be sent. + */ + +int +mtrr_add(unsigned long base, unsigned long size, unsigned int type, + char increment) +{ + if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { + printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n"); + printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base); + return -EINVAL; + } + return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, + increment); +} + +/** + * mtrr_del_page - delete a memory type region + * @reg: Register returned by mtrr_add + * @base: Physical base address + * @size: Size of region + * + * If register is supplied then base and size are ignored. This is + * how drivers should call it. + * + * Releases an MTRR region. If the usage count drops to zero the + * register is freed and the region returns to default state. + * On success the register is returned, on failure a negative error + * code. + */ + +int mtrr_del_page(int reg, unsigned long base, unsigned long size) +{ + int i, max; + mtrr_type ltype; + unsigned long lbase; + unsigned int lsize; + int error = -EINVAL; + + if (!mtrr_if) + return -ENXIO; + + max = num_var_ranges; + down(&main_lock); + if (reg < 0) { + /* Search for existing MTRR */ + for (i = 0; i < max; ++i) { + mtrr_if->get(i, &lbase, &lsize, <ype); + if (lbase == base && lsize == size) { + reg = i; + break; + } + } + if (reg < 0) { + printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, + size); + goto out; + } + } + if (reg >= max) { + printk(KERN_WARNING "mtrr: register: %d too big\n", reg); + goto out; + } + if (is_cpu(CYRIX) && !use_intel()) { + if ((reg == 3) && arr3_protected) { + printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n"); + goto out; + } + } + mtrr_if->get(reg, &lbase, &lsize, <ype); + if (lsize < 1) { + printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); + goto out; + } + if (usage_table[reg] < 1) { + printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); + goto out; + } + if (--usage_table[reg] < 1) + set_mtrr(reg, 0, 0, 0); + error = reg; + out: + up(&main_lock); + return error; +} +/** + * mtrr_del - delete a memory type region + * @reg: Register returned by mtrr_add + * @base: Physical base address + * @size: Size of region + * + * If register is supplied then base and size are ignored. This is + * how drivers should call it. + * + * Releases an MTRR region. If the usage count drops to zero the + * register is freed and the region returns to default state. + * On success the register is returned, on failure a negative error + * code. + */ + +int +mtrr_del(int reg, unsigned long base, unsigned long size) +{ + if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { + printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n"); + printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base); + return -EINVAL; + } + return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); +} + +EXPORT_SYMBOL(mtrr_add); +EXPORT_SYMBOL(mtrr_del); + +/* HACK ALERT! + * These should be called implicitly, but we can't yet until all the initcall + * stuff is done... + */ +extern void amd_init_mtrr(void); +extern void cyrix_init_mtrr(void); +extern void centaur_init_mtrr(void); + +static void __init init_ifs(void) +{ + amd_init_mtrr(); + cyrix_init_mtrr(); + centaur_init_mtrr(); +} + +static void __init init_other_cpus(void) +{ + if (use_intel()) + get_mtrr_state(); + + /* bring up the other processors */ + set_mtrr(~0U,0,0,0); + + if (use_intel()) { + finalize_mtrr_state(); + mtrr_state_warn(); + } +} + + +struct mtrr_value { + mtrr_type ltype; + unsigned long lbase; + unsigned int lsize; +}; + +static struct mtrr_value * mtrr_state; + +static int mtrr_save(struct sys_device * sysdev, u32 state) +{ + int i; + int size = num_var_ranges * sizeof(struct mtrr_value); + + mtrr_state = kmalloc(size,GFP_ATOMIC); + if (mtrr_state) + memset(mtrr_state,0,size); + else + return -ENOMEM; + + for (i = 0; i < num_var_ranges; i++) { + mtrr_if->get(i, + &mtrr_state[i].lbase, + &mtrr_state[i].lsize, + &mtrr_state[i].ltype); + } + return 0; +} + +static int mtrr_restore(struct sys_device * sysdev) +{ + int i; + + for (i = 0; i < num_var_ranges; i++) { + if (mtrr_state[i].lsize) + set_mtrr(i, + mtrr_state[i].lbase, + mtrr_state[i].lsize, + mtrr_state[i].ltype); + } + kfree(mtrr_state); + return 0; +} + + + +static struct sysdev_driver mtrr_sysdev_driver = { + .suspend = mtrr_save, + .resume = mtrr_restore, +}; + + +/** + * mtrr_init - initialize mtrrs on the boot CPU + * + * This needs to be called early; before any of the other CPUs are + * initialized (i.e. before smp_init()). + * + */ +static int __init mtrr_init(void) +{ + init_ifs(); + + if (cpu_has_mtrr) { + mtrr_if = &generic_mtrr_ops; + size_or_mask = 0xff000000; /* 36 bits */ + size_and_mask = 0x00f00000; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + /* The original Athlon docs said that + total addressable memory is 44 bits wide. + It was not really clear whether its MTRRs + follow this or not. (Read: 44 or 36 bits). + However, "x86-64_overview.pdf" explicitly + states that "previous implementations support + 36 bit MTRRs" and also provides a way to + query the width (in bits) of the physical + addressable memory on the Hammer family. + */ + if (boot_cpu_data.x86 == 15 + && (cpuid_eax(0x80000000) >= 0x80000008)) { + u32 phys_addr; + phys_addr = cpuid_eax(0x80000008) & 0xff; + size_or_mask = + ~((1 << (phys_addr - PAGE_SHIFT)) - 1); + size_and_mask = ~size_or_mask & 0xfff00000; + } + /* Athlon MTRRs use an Intel-compatible interface for + * getting and setting */ + break; + case X86_VENDOR_CENTAUR: + if (boot_cpu_data.x86 == 6) { + /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */ + size_or_mask = 0xfff00000; /* 32 bits */ + size_and_mask = 0; + } + break; + + default: + break; + } + } else { + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + if (cpu_has_k6_mtrr) { + /* Pre-Athlon (K6) AMD CPU MTRRs */ + mtrr_if = mtrr_ops[X86_VENDOR_AMD]; + size_or_mask = 0xfff00000; /* 32 bits */ + size_and_mask = 0; + } + break; + case X86_VENDOR_CENTAUR: + if (cpu_has_centaur_mcr) { + mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; + size_or_mask = 0xfff00000; /* 32 bits */ + size_and_mask = 0; + } + break; + case X86_VENDOR_CYRIX: + if (cpu_has_cyrix_arr) { + mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; + size_or_mask = 0xfff00000; /* 32 bits */ + size_and_mask = 0; + } + break; + default: + break; + } + } + printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION); + + if (mtrr_if) { + set_num_var_ranges(); + init_table(); + init_other_cpus(); + + return sysdev_driver_register(&cpu_sysdev_class, + &mtrr_sysdev_driver); + } + return -ENXIO; +} + +subsys_initcall(mtrr_init); diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h new file mode 100644 index 00000000000..de135124559 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/mtrr.h @@ -0,0 +1,98 @@ +/* + * local mtrr defines. + */ + +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif + +#define MTRRcap_MSR 0x0fe +#define MTRRdefType_MSR 0x2ff + +#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) +#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) + +#define NUM_FIXED_RANGES 88 +#define MTRRfix64K_00000_MSR 0x250 +#define MTRRfix16K_80000_MSR 0x258 +#define MTRRfix16K_A0000_MSR 0x259 +#define MTRRfix4K_C0000_MSR 0x268 +#define MTRRfix4K_C8000_MSR 0x269 +#define MTRRfix4K_D0000_MSR 0x26a +#define MTRRfix4K_D8000_MSR 0x26b +#define MTRRfix4K_E0000_MSR 0x26c +#define MTRRfix4K_E8000_MSR 0x26d +#define MTRRfix4K_F0000_MSR 0x26e +#define MTRRfix4K_F8000_MSR 0x26f + +#define MTRR_CHANGE_MASK_FIXED 0x01 +#define MTRR_CHANGE_MASK_VARIABLE 0x02 +#define MTRR_CHANGE_MASK_DEFTYPE 0x04 + +/* In the Intel processor's MTRR interface, the MTRR type is always held in + an 8 bit field: */ +typedef u8 mtrr_type; + +struct mtrr_ops { + u32 vendor; + u32 use_intel_if; +// void (*init)(void); + void (*set)(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type); + void (*set_all)(void); + + void (*get)(unsigned int reg, unsigned long *base, + unsigned int *size, mtrr_type * type); + int (*get_free_region) (unsigned long base, unsigned long size); + + int (*validate_add_page)(unsigned long base, unsigned long size, + unsigned int type); + int (*have_wrcomb)(void); +}; + +extern int generic_get_free_region(unsigned long base, unsigned long size); +extern int generic_validate_add_page(unsigned long base, unsigned long size, + unsigned int type); + +extern struct mtrr_ops generic_mtrr_ops; + +extern int positive_have_wrcomb(void); + +/* library functions for processor-specific routines */ +struct set_mtrr_context { + unsigned long flags; + unsigned long deftype_lo; + unsigned long deftype_hi; + unsigned long cr4val; + unsigned long ccr3; +}; + +struct mtrr_var_range { + unsigned long base_lo; + unsigned long base_hi; + unsigned long mask_lo; + unsigned long mask_hi; +}; + +void set_mtrr_done(struct set_mtrr_context *ctxt); +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); + +void get_mtrr_state(void); + +extern void set_mtrr_ops(struct mtrr_ops * ops); + +extern u32 size_or_mask, size_and_mask; +extern struct mtrr_ops * mtrr_if; + +#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) +#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) + +extern unsigned int num_var_ranges; + +void finalize_mtrr_state(void); +void mtrr_state_warn(void); +char *mtrr_attrib_to_str(int x); +void mtrr_wrmsr(unsigned, unsigned, unsigned); + diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c new file mode 100644 index 00000000000..f62ecd15811 --- /dev/null +++ b/arch/i386/kernel/cpu/mtrr/state.c @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include +#include "mtrr.h" + + +/* Put the processor into a state where MTRRs can be safely set */ +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) +{ + unsigned int cr0; + + /* Disable interrupts locally */ + local_irq_save(ctxt->flags); + + if (use_intel() || is_cpu(CYRIX)) { + + /* Save value of CR4 and clear Page Global Enable (bit 7) */ + if ( cpu_has_pge ) { + ctxt->cr4val = read_cr4(); + write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7)); + } + + /* Disable and flush caches. Note that wbinvd flushes the TLBs as + a side-effect */ + cr0 = read_cr0() | 0x40000000; + wbinvd(); + write_cr0(cr0); + wbinvd(); + + if (use_intel()) + /* Save MTRR state */ + rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + else + /* Cyrix ARRs - everything else were excluded at the top */ + ctxt->ccr3 = getCx86(CX86_CCR3); + } +} + +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) +{ + if (use_intel()) + /* Disable MTRRs, and set the default type to uncached */ + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, + ctxt->deftype_hi); + else if (is_cpu(CYRIX)) + /* Cyrix ARRs - everything else were excluded at the top */ + setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); +} + +/* Restore the processor after a set_mtrr_prepare */ +void set_mtrr_done(struct set_mtrr_context *ctxt) +{ + if (use_intel() || is_cpu(CYRIX)) { + + /* Flush caches and TLBs */ + wbinvd(); + + /* Restore MTRRdefType */ + if (use_intel()) + /* Intel (P6) standard MTRRs */ + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + else + /* Cyrix ARRs - everything else was excluded at the top */ + setCx86(CX86_CCR3, ctxt->ccr3); + + /* Enable caches */ + write_cr0(read_cr0() & 0xbfffffff); + + /* Restore value of CR4 */ + if ( cpu_has_pge ) + write_cr4(ctxt->cr4val); + } + /* Re-enable interrupts locally (if enabled previously) */ + local_irq_restore(ctxt->flags); +} + -- cgit v1.2.3