aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-26 13:13:19 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-26 13:13:19 -0400
commitc226951b93f7cd7c3a10b17384535b617bd43fd0 (patch)
tree07b8796a5c99fbbf587b8d0dbcbc173cfe5e381e /arch/x86_64
parentb0df3bd1e553e901ec7297267611a5db88240b38 (diff)
parente8216dee838c09776680a6f1a2e54d81f3cdfa14 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/kernel/e820.c48
-rw-r--r--arch/x86_64/kernel/setup.c1
-rw-r--r--arch/x86_64/kernel/smpboot.c3
-rw-r--r--arch/x86_64/kernel/suspend_asm.S2
-rw-r--r--arch/x86_64/kernel/time.c37
-rw-r--r--arch/x86_64/mm/fault.c6
-rw-r--r--arch/x86_64/mm/init.c2
8 files changed, 91 insertions, 12 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6cd4878625f..581ce9af0ec 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -24,6 +24,10 @@ config X86
bool
default y
+config ZONE_DMA32
+ bool
+ default y
+
config LOCKDEP_SUPPORT
bool
default y
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index d6d7f731f6f..708a3cd9a27 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/kexec.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -297,6 +298,53 @@ void __init e820_reserve_resources(void)
}
}
+/* Mark pages corresponding to given address range as nosave */
+static void __init
+e820_mark_nosave_range(unsigned long start, unsigned long end)
+{
+ unsigned long pfn, max_pfn;
+
+ if (start >= end)
+ return;
+
+ printk("Nosave address range: %016lx - %016lx\n", start, end);
+ max_pfn = end >> PAGE_SHIFT;
+ for (pfn = start >> PAGE_SHIFT; pfn < max_pfn; pfn++)
+ if (pfn_valid(pfn))
+ SetPageNosave(pfn_to_page(pfn));
+}
+
+/*
+ * Find the ranges of physical addresses that do not correspond to
+ * e820 RAM areas and mark the corresponding pages as nosave for software
+ * suspend and suspend to RAM.
+ *
+ * This function requires the e820 map to be sorted and without any
+ * overlapping entries and assumes the first e820 area to be RAM.
+ */
+void __init e820_mark_nosave_regions(void)
+{
+ int i;
+ unsigned long paddr;
+
+ paddr = round_down(e820.map[0].addr + e820.map[0].size, PAGE_SIZE);
+ for (i = 1; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (paddr < ei->addr)
+ e820_mark_nosave_range(paddr,
+ round_up(ei->addr, PAGE_SIZE));
+
+ paddr = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (ei->type != E820_RAM)
+ e820_mark_nosave_range(round_up(ei->addr, PAGE_SIZE),
+ paddr);
+
+ if (paddr >= (end_pfn << PAGE_SHIFT))
+ break;
+ }
+}
+
/*
* Add a memory region to the kernel e820 map.
*/
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 34afad70482..4b39f0da17f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -689,6 +689,7 @@ void __init setup_arch(char **cmdline_p)
*/
probe_roms();
e820_reserve_resources();
+ e820_mark_nosave_regions();
request_resource(&iomem_resource, &video_ram_resource);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 975380207b4..3ae9ffddddc 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -46,9 +46,10 @@
#include <linux/bootmem.h>
#include <linux/thread_info.h>
#include <linux/module.h>
-
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
+#include <linux/smp.h>
+
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
#include <asm/desc.h>
diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S
index 320b6fb00cc..bfbe00763c6 100644
--- a/arch/x86_64/kernel/suspend_asm.S
+++ b/arch/x86_64/kernel/suspend_asm.S
@@ -54,7 +54,7 @@ ENTRY(restore_image)
movq %rcx, %cr3;
movq %rax, %cr4; # turn PGE back on
- movq pagedir_nosave(%rip), %rdx
+ movq restore_pblist(%rip), %rdx
loop:
testq %rdx, %rdx
jz done
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 7a9b1822418..7700e6cd2bd 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -1148,23 +1148,25 @@ int hpet_rtc_timer_init(void)
hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
local_irq_save(flags);
+
cnt = hpet_readl(HPET_COUNTER);
cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
hpet_writel(cnt, HPET_T1_CMP);
hpet_t1_cmp = cnt;
- local_irq_restore(flags);
cfg = hpet_readl(HPET_T1_CFG);
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_writel(cfg, HPET_T1_CFG);
+ local_irq_restore(flags);
+
return 1;
}
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, cnt;
+ unsigned int cfg, cnt, ticks_per_int, lost_ints;
if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
cfg = hpet_readl(HPET_T1_CFG);
@@ -1179,10 +1181,33 @@ static void hpet_rtc_timer_reinit(void)
hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
/* It is more accurate to use the comparator value than current count.*/
- cnt = hpet_t1_cmp;
- cnt += hpet_tick*HZ/hpet_rtc_int_freq;
- hpet_writel(cnt, HPET_T1_CMP);
- hpet_t1_cmp = cnt;
+ ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
+ hpet_t1_cmp += ticks_per_int;
+ hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
+
+ /*
+ * If the interrupt handler was delayed too long, the write above tries
+ * to schedule the next interrupt in the past and the hardware would
+ * not interrupt until the counter had wrapped around.
+ * So we have to check that the comparator wasn't set to a past time.
+ */
+ cnt = hpet_readl(HPET_COUNTER);
+ if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
+ lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
+ /* Make sure that, even with the time needed to execute
+ * this code, the next scheduled interrupt has been moved
+ * back to the future: */
+ lost_ints++;
+
+ hpet_t1_cmp += lost_ints * ticks_per_int;
+ hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
+
+ if (PIE_on)
+ PIE_count += lost_ints;
+
+ printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+ hpet_rtc_int_freq);
+ }
}
/*
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index ac8ea66ccb9..4198798e146 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -299,7 +299,7 @@ static int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
/* Below here mismatches are bugs because these lower tables
are shared */
@@ -308,7 +308,7 @@ static int vmalloc_fault(unsigned long address)
pud_ref = pud_offset(pgd_ref, address);
if (pud_none(*pud_ref))
return -1;
- if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
BUG();
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
@@ -641,7 +641,7 @@ void vmalloc_sync_all(void)
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
- BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
spin_unlock(&pgd_lock);
set_bit(pgd_index(address), insync);
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index d14fb2dfbfc..52fd42c40c8 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -536,7 +536,7 @@ int memory_add_physaddr_to_nid(u64 start)
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat = NODE_DATA(nid);
- struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
+ struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;