aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/mm/fault.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pretzel.yyz.us>2005-06-26 18:06:06 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-26 18:06:06 -0400
commitaef7b83c92dd0b7e994805440655d1d64147287b (patch)
tree981f373358c1988e061625e8f272013065cb086f /arch/x86_64/mm/fault.c
parentb1fc5505e0dbcc3fd7c75bfe6bee39ec50080963 (diff)
parent8678887e7fb43cd6c9be6c9807b05e77848e0920 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'arch/x86_64/mm/fault.c')
-rw-r--r--arch/x86_64/mm/fault.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index e0330921676..2f187986f94 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -74,7 +74,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
instr = (unsigned char *)convert_rip_to_linear(current, regs);
max_instr = instr + 15;
- if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE)
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
return 0;
while (scan_more && instr < max_instr) {
@@ -106,7 +106,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
/* Could check the LDT for lm, but for now it's good
enough to assume that long mode only uses well known
segments or kernel. */
- scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
break;
case 0x60:
@@ -234,6 +234,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
/*
* Handle a fault on the vmalloc or module mapping area
+ *
+ * This assumes no large pages in there.
*/
static int vmalloc_fault(unsigned long address)
{
@@ -272,7 +274,10 @@ static int vmalloc_fault(unsigned long address)
if (!pte_present(*pte_ref))
return -1;
pte = pte_offset_kernel(pmd, address);
- if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
+ /* Don't use pte_page here, because the mappings can point
+ outside mem_map, and the NUMA hash lookup cannot handle
+ that. */
+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
__flush_tlb_all();
return 0;
@@ -345,8 +350,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 1) == 0.
*/
- if (unlikely(address >= TASK_SIZE)) {
- if (!(error_code & 5)) {
+ if (unlikely(address >= TASK_SIZE64)) {
+ if (!(error_code & 5) &&
+ ((address >= VMALLOC_START && address < VMALLOC_END) ||
+ (address >= MODULES_VADDR && address < MODULES_END))) {
if (vmalloc_fault(address) < 0)
goto bad_area_nosemaphore;
return;