aboutsummaryrefslogtreecommitdiff
path: root/arch/ppc64
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-07-13 15:25:59 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-07-13 15:25:59 +0100
commit30beab1491f0b96b2f23d3fb68af01fd921a16d8 (patch)
treec580bdc0846269fbb10feeda901ecec1a48ee2ef /arch/ppc64
parent21af6c4f2aa5f63138871b4ddd77d7ebf2588c9d (diff)
parentc32511e2718618f0b53479eb36e07439aa363a74 (diff)
Merge with /shiny/git/linux-2.6/.git
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/Kconfig2
-rw-r--r--arch/ppc64/kernel/cputable.c365
-rw-r--r--arch/ppc64/kernel/head.S10
-rw-r--r--arch/ppc64/kernel/hvconsole.c51
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c94
-rw-r--r--arch/ppc64/kernel/idle.c283
-rw-r--r--arch/ppc64/kernel/kprobes.c2
-rw-r--r--arch/ppc64/kernel/maple_setup.c3
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/of_device.c15
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c156
-rw-r--r--arch/ppc64/kernel/pmac_setup.c5
-rw-r--r--arch/ppc64/kernel/setup.c8
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c54
-rw-r--r--arch/ppc64/kernel/sysfs.c14
-rw-r--r--arch/ppc64/kernel/vdso32/vdso32.lds.S4
16 files changed, 536 insertions, 536 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index f804f25232a..fdd8afba715 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -429,6 +429,8 @@ config CMDLINE
endmenu
+source "net/Kconfig"
+
source "drivers/Kconfig"
source "fs/Kconfig"
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 1d162c7c59d..8d4c46f6f0b 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -49,160 +49,219 @@ extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
#endif
struct cpu_spec cpu_specs[] = {
- { /* Power3 */
- 0xffff0000, 0x00400000, "POWER3 (630)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* Power3+ */
- 0xffff0000, 0x00410000, "POWER3 (630+)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* Northstar */
- 0xffff0000, 0x00330000, "RS64-II (northstar)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* Pulsar */
- 0xffff0000, 0x00340000, "RS64-III (pulsar)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* I-star */
- 0xffff0000, 0x00360000, "RS64-III (icestar)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* S-star */
- 0xffff0000, 0x00370000, "RS64-IV (sstar)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power3,
- COMMON_PPC64_FW
- },
- { /* Power4 */
- 0xffff0000, 0x00350000, "POWER4 (gp)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power4,
- COMMON_PPC64_FW
- },
- { /* Power4+ */
- 0xffff0000, 0x00380000, "POWER4+ (gq)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power4,
- COMMON_PPC64_FW
- },
- { /* PPC970 */
- 0xffff0000, 0x00390000, "PPC970",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
- CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
- 128, 128,
- __setup_cpu_ppc970,
- COMMON_PPC64_FW
- },
- { /* PPC970FX */
- 0xffff0000, 0x003c0000, "PPC970FX",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
- CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
- COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
- 128, 128,
- __setup_cpu_ppc970,
- COMMON_PPC64_FW
- },
- { /* Power5 */
- 0xffff0000, 0x003a0000, "POWER5 (gr)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
- CPU_FTR_MMCRA_SIHV,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power4,
- COMMON_PPC64_FW
- },
- { /* Power5 */
- 0xffff0000, 0x003b0000, "POWER5 (gs)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
- CPU_FTR_MMCRA_SIHV,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power4,
- COMMON_PPC64_FW
- },
- { /* BE DD1.x */
- 0xffff0000, 0x00700000, "Broadband Engine",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
- CPU_FTR_SMT,
- COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
- 128, 128,
- __setup_cpu_be,
- COMMON_PPC64_FW
- },
- { /* default match */
- 0x00000000, 0x00000000, "POWER4 (compatible)",
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
- CPU_FTR_PPCAS_ARCH_V2,
- COMMON_USER_PPC64,
- 128, 128,
- __setup_cpu_power4,
- COMMON_PPC64_FW
- }
+ { /* Power3 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00400000,
+ .cpu_name = "POWER3 (630)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Power3+ */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00410000,
+ .cpu_name = "POWER3 (630+)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Northstar */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00330000,
+ .cpu_name = "RS64-II (northstar)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Pulsar */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00340000,
+ .cpu_name = "RS64-III (pulsar)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* I-star */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00360000,
+ .cpu_name = "RS64-III (icestar)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* S-star */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00370000,
+ .cpu_name = "RS64-IV (sstar)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power3,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Power4 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00350000,
+ .cpu_name = "POWER4 (gp)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power4,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Power4+ */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00380000,
+ .cpu_name = "POWER4+ (gq)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power4,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* PPC970 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00390000,
+ .cpu_name = "PPC970",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
+ .cpu_user_features = COMMON_USER_PPC64 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_ppc970,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* PPC970FX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003c0000,
+ .cpu_name = "PPC970FX",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
+ .cpu_user_features = COMMON_USER_PPC64 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_ppc970,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Power5 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003a0000,
+ .cpu_name = "POWER5 (gr)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
+ CPU_FTR_MMCRA_SIHV,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power4,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* Power5 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003b0000,
+ .cpu_name = "POWER5 (gs)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
+ CPU_FTR_MMCRA_SIHV,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power4,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* BE DD1.x */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00700000,
+ .cpu_name = "Broadband Engine",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_SMT,
+ .cpu_user_features = COMMON_USER_PPC64 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_be,
+ .firmware_features = COMMON_PPC64_FW,
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "POWER4 (compatible)",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_PPCAS_ARCH_V2,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power4,
+ .firmware_features = COMMON_PPC64_FW,
+ }
};
firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
- {FW_FEATURE_PFT, "hcall-pft"},
- {FW_FEATURE_TCE, "hcall-tce"},
- {FW_FEATURE_SPRG0, "hcall-sprg0"},
- {FW_FEATURE_DABR, "hcall-dabr"},
- {FW_FEATURE_COPY, "hcall-copy"},
- {FW_FEATURE_ASR, "hcall-asr"},
- {FW_FEATURE_DEBUG, "hcall-debug"},
- {FW_FEATURE_PERF, "hcall-perf"},
- {FW_FEATURE_DUMP, "hcall-dump"},
- {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
- {FW_FEATURE_MIGRATE, "hcall-migrate"},
- {FW_FEATURE_PERFMON, "hcall-perfmon"},
- {FW_FEATURE_CRQ, "hcall-crq"},
- {FW_FEATURE_VIO, "hcall-vio"},
- {FW_FEATURE_RDMA, "hcall-rdma"},
- {FW_FEATURE_LLAN, "hcall-lLAN"},
- {FW_FEATURE_BULK, "hcall-bulk"},
- {FW_FEATURE_XDABR, "hcall-xdabr"},
- {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
- {FW_FEATURE_SPLPAR, "hcall-splpar"},
+ {FW_FEATURE_PFT, "hcall-pft"},
+ {FW_FEATURE_TCE, "hcall-tce"},
+ {FW_FEATURE_SPRG0, "hcall-sprg0"},
+ {FW_FEATURE_DABR, "hcall-dabr"},
+ {FW_FEATURE_COPY, "hcall-copy"},
+ {FW_FEATURE_ASR, "hcall-asr"},
+ {FW_FEATURE_DEBUG, "hcall-debug"},
+ {FW_FEATURE_PERF, "hcall-perf"},
+ {FW_FEATURE_DUMP, "hcall-dump"},
+ {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
+ {FW_FEATURE_MIGRATE, "hcall-migrate"},
+ {FW_FEATURE_PERFMON, "hcall-perfmon"},
+ {FW_FEATURE_CRQ, "hcall-crq"},
+ {FW_FEATURE_VIO, "hcall-vio"},
+ {FW_FEATURE_RDMA, "hcall-rdma"},
+ {FW_FEATURE_LLAN, "hcall-lLAN"},
+ {FW_FEATURE_BULK, "hcall-bulk"},
+ {FW_FEATURE_XDABR, "hcall-xdabr"},
+ {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
+ {FW_FEATURE_SPLPAR, "hcall-splpar"},
};
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 675c2708588..93ebcac0d5a 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -308,6 +308,7 @@ exception_marker:
label##_pSeries: \
HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
+ RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
@@ -315,6 +316,7 @@ label##_pSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
+ RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common
@@ -324,6 +326,7 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
+ RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROCENABLED(r13); \
cmpwi 0,r10,0; \
@@ -393,6 +396,7 @@ __start_interrupts:
_machine_check_pSeries:
HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
+ RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
. = 0x300
@@ -419,6 +423,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
data_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRG1,r13
+ RUNLATCH_ON(r13)
mfspr r13,SPRG3 /* get paca address into r13 */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
std r10,PACA_EXSLB+EX_R10(r13)
@@ -439,6 +444,7 @@ data_access_slb_pSeries:
instruction_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRG1,r13
+ RUNLATCH_ON(r13)
mfspr r13,SPRG3 /* get paca address into r13 */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
std r10,PACA_EXSLB+EX_R10(r13)
@@ -464,6 +470,7 @@ instruction_access_slb_pSeries:
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
+ RUNLATCH_ON(r9)
mr r9,r13
mfmsr r10
mfspr r13,SPRG3
@@ -707,11 +714,13 @@ fwnmi_data_area:
system_reset_fwnmi:
HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
+ RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
.globl machine_check_fwnmi
machine_check_fwnmi:
HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
+ RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
/*
@@ -848,6 +857,7 @@ unrecov_fer:
.align 7
.globl data_access_common
data_access_common:
+ RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
mfspr r10,DAR
std r10,PACA_EXGEN+EX_DAR(r13)
mfspr r10,DSISR
diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c
index c72fb8ffe97..138e128a388 100644
--- a/arch/ppc64/kernel/hvconsole.c
+++ b/arch/ppc64/kernel/hvconsole.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
-#include <asm/prom.h>
/**
* hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -42,29 +41,14 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count)
unsigned long got;
if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
- (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) {
- /*
- * Work around a HV bug where it gives us a null
- * after every \r. -- paulus
- */
- if (got > 0) {
- int i;
- for (i = 1; i < got; ++i) {
- if (buf[i] == 0 && buf[i-1] == '\r') {
- --got;
- if (i < got)
- memmove(&buf[i], &buf[i+1],
- got - i);
- }
- }
- }
+ (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
return got;
- }
return 0;
}
EXPORT_SYMBOL(hvc_get_chars);
+
/**
* hvc_put_chars: send characters to firmware for denoted vterm adapter
* @vtermno: The vtermno or unit_address of the adapter from which the data
@@ -88,34 +72,3 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
}
EXPORT_SYMBOL(hvc_put_chars);
-
-/*
- * We hope/assume that the first vty found corresponds to the first console
- * device.
- */
-int hvc_find_vtys(void)
-{
- struct device_node *vty;
- int num_found = 0;
-
- for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
- vty = of_find_node_by_name(vty, "vty")) {
- uint32_t *vtermno;
-
- /* We have statically defined space for only a certain number of
- * console adapters. */
- if (num_found >= MAX_NR_HVC_CONSOLES)
- break;
-
- vtermno = (uint32_t *)get_property(vty, "reg", NULL);
- if (!vtermno)
- continue;
-
- if (device_is_compatible(vty, "hvterm1")) {
- hvc_instantiate(*vtermno, num_found);
- ++num_found;
- }
- }
-
- return num_found;
-}
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index b3f770f6d40..077c82fc9f3 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -834,6 +834,92 @@ static int __init iSeries_src_init(void)
late_initcall(iSeries_src_init);
+static inline void process_iSeries_events(void)
+{
+ asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
+}
+
+static void yield_shared_processor(void)
+{
+ unsigned long tb;
+
+ HvCall_setEnabledInterrupts(HvCall_MaskIPI |
+ HvCall_MaskLpEvent |
+ HvCall_MaskLpProd |
+ HvCall_MaskTimeout);
+
+ tb = get_tb();
+ /* Compute future tb value when yield should expire */
+ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
+
+ /*
+ * The decrementer stops during the yield. Force a fake decrementer
+ * here and let the timer_interrupt code sort out the actual time.
+ */
+ get_paca()->lppaca.int_dword.fields.decr_int = 1;
+ process_iSeries_events();
+}
+
+static int iseries_shared_idle(void)
+{
+ while (1) {
+ while (!need_resched() && !hvlpevent_is_pending()) {
+ local_irq_disable();
+ ppc64_runlatch_off();
+
+ /* Recheck with irqs off */
+ if (!need_resched() && !hvlpevent_is_pending())
+ yield_shared_processor();
+
+ HMT_medium();
+ local_irq_enable();
+ }
+
+ ppc64_runlatch_on();
+
+ if (hvlpevent_is_pending())
+ process_iSeries_events();
+
+ schedule();
+ }
+
+ return 0;
+}
+
+static int iseries_dedicated_idle(void)
+{
+ long oldval;
+
+ while (1) {
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while (!need_resched()) {
+ ppc64_runlatch_off();
+ HMT_low();
+
+ if (hvlpevent_is_pending()) {
+ HMT_medium();
+ ppc64_runlatch_on();
+ process_iSeries_events();
+ }
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+
+ ppc64_runlatch_on();
+ schedule();
+ }
+
+ return 0;
+}
+
#ifndef CONFIG_PCI
void __init iSeries_init_IRQ(void) { }
#endif
@@ -859,5 +945,13 @@ void __init iSeries_early_setup(void)
ppc_md.get_rtc_time = iSeries_get_rtc_time;
ppc_md.calibrate_decr = iSeries_calibrate_decr;
ppc_md.progress = iSeries_progress;
+
+ if (get_paca()->lppaca.shared_proc) {
+ ppc_md.idle_loop = iseries_shared_idle;
+ printk(KERN_INFO "Using shared processor idle loop\n");
+ } else {
+ ppc_md.idle_loop = iseries_dedicated_idle;
+ printk(KERN_INFO "Using dedicated idle loop\n");
+ }
}
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 08952c7e621..954395d4263 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -20,109 +20,18 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/cpu.h>
-#include <linux/module.h>
#include <linux/sysctl.h>
-#include <linux/smp.h>
#include <asm/system.h>
#include <asm/processor.h>
-#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm/time.h>
-#include <asm/iSeries/HvCall.h>
-#include <asm/iSeries/ItLpQueue.h>
-#include <asm/plpar_wrappers.h>
#include <asm/systemcfg.h>
+#include <asm/machdep.h>
extern void power4_idle(void);
-static int (*idle_loop)(void);
-
-#ifdef CONFIG_PPC_ISERIES
-static unsigned long maxYieldTime = 0;
-static unsigned long minYieldTime = 0xffffffffffffffffUL;
-
-static inline void process_iSeries_events(void)
-{
- asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
-}
-
-static void yield_shared_processor(void)
-{
- unsigned long tb;
- unsigned long yieldTime;
-
- HvCall_setEnabledInterrupts(HvCall_MaskIPI |
- HvCall_MaskLpEvent |
- HvCall_MaskLpProd |
- HvCall_MaskTimeout);
-
- tb = get_tb();
- /* Compute future tb value when yield should expire */
- HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
-
- yieldTime = get_tb() - tb;
- if (yieldTime > maxYieldTime)
- maxYieldTime = yieldTime;
-
- if (yieldTime < minYieldTime)
- minYieldTime = yieldTime;
-
- /*
- * The decrementer stops during the yield. Force a fake decrementer
- * here and let the timer_interrupt code sort out the actual time.
- */
- get_paca()->lppaca.int_dword.fields.decr_int = 1;
- process_iSeries_events();
-}
-
-static int iSeries_idle(void)
-{
- struct paca_struct *lpaca;
- long oldval;
-
- /* ensure iSeries run light will be out when idle */
- ppc64_runlatch_off();
-
- lpaca = get_paca();
-
- while (1) {
- if (lpaca->lppaca.shared_proc) {
- if (hvlpevent_is_pending())
- process_iSeries_events();
- if (!need_resched())
- yield_shared_processor();
- } else {
- oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-
- if (!oldval) {
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while (!need_resched()) {
- HMT_medium();
- if (hvlpevent_is_pending())
- process_iSeries_events();
- HMT_low();
- }
-
- HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- } else {
- set_need_resched();
- }
- }
-
- ppc64_runlatch_on();
- schedule();
- ppc64_runlatch_off();
- }
-
- return 0;
-}
-
-#else
-
-static int default_idle(void)
+int default_idle(void)
{
long oldval;
unsigned int cpu = smp_processor_id();
@@ -134,7 +43,8 @@ static int default_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched() && !cpu_is_offline(cpu)) {
- barrier();
+ ppc64_runlatch_off();
+
/*
* Go into low thread priority and possibly
* low power mode.
@@ -149,6 +59,7 @@ static int default_idle(void)
set_need_resched();
}
+ ppc64_runlatch_on();
schedule();
if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
cpu_die();
@@ -157,127 +68,19 @@ static int default_idle(void)
return 0;
}
-#ifdef CONFIG_PPC_PSERIES
-
-DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
-
-int dedicated_idle(void)
+int native_idle(void)
{
- long oldval;
- struct paca_struct *lpaca = get_paca(), *ppaca;
- unsigned long start_snooze;
- unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
- unsigned int cpu = smp_processor_id();
-
- ppaca = &paca[cpu ^ 1];
-
while (1) {
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- lpaca->lppaca.idle = 1;
-
- oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
- if (!oldval) {
- set_thread_flag(TIF_POLLING_NRFLAG);
- start_snooze = __get_tb() +
- *smt_snooze_delay * tb_ticks_per_usec;
- while (!need_resched() && !cpu_is_offline(cpu)) {
- /*
- * Go into low thread priority and possibly
- * low power mode.
- */
- HMT_low();
- HMT_very_low();
-
- if (*smt_snooze_delay == 0 ||
- __get_tb() < start_snooze)
- continue;
-
- HMT_medium();
-
- if (!(ppaca->lppaca.idle)) {
- local_irq_disable();
-
- /*
- * We are about to sleep the thread
- * and so wont be polling any
- * more.
- */
- clear_thread_flag(TIF_POLLING_NRFLAG);
-
- /*
- * SMT dynamic mode. Cede will result
- * in this thread going dormant, if the
- * partner thread is still doing work.
- * Thread wakes up if partner goes idle,
- * an interrupt is presented, or a prod
- * occurs. Returning from the cede
- * enables external interrupts.
- */
- if (!need_resched())
- cede_processor();
- else
- local_irq_enable();
- } else {
- /*
- * Give the HV an opportunity at the
- * processor, since we are not doing
- * any work.
- */
- poll_pending();
- }
- }
-
- clear_thread_flag(TIF_POLLING_NRFLAG);
- } else {
- set_need_resched();
- }
-
- HMT_medium();
- lpaca->lppaca.idle = 0;
- schedule();
- if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
- cpu_die();
- }
- return 0;
-}
-
-static int shared_idle(void)
-{
- struct paca_struct *lpaca = get_paca();
- unsigned int cpu = smp_processor_id();
-
- while (1) {
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- lpaca->lppaca.idle = 1;
+ ppc64_runlatch_off();
- while (!need_resched() && !cpu_is_offline(cpu)) {
- local_irq_disable();
+ if (!need_resched())
+ power4_idle();
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- *
- * Check need_resched() again with interrupts disabled
- * to avoid a race.
- */
- if (!need_resched())
- cede_processor();
- else
- local_irq_enable();
+ if (need_resched()) {
+ ppc64_runlatch_on();
+ schedule();
}
- HMT_medium();
- lpaca->lppaca.idle = 0;
- schedule();
if (cpu_is_offline(smp_processor_id()) &&
system_state == SYSTEM_RUNNING)
cpu_die();
@@ -286,29 +89,10 @@ static int shared_idle(void)
return 0;
}
-#endif /* CONFIG_PPC_PSERIES */
-
-static int native_idle(void)
-{
- while(1) {
- /* check CPU type here */
- if (!need_resched())
- power4_idle();
- if (need_resched())
- schedule();
-
- if (cpu_is_offline(raw_smp_processor_id()) &&
- system_state == SYSTEM_RUNNING)
- cpu_die();
- }
- return 0;
-}
-
-#endif /* CONFIG_PPC_ISERIES */
-
void cpu_idle(void)
{
- idle_loop();
+ BUG_ON(NULL == ppc_md.idle_loop);
+ ppc_md.idle_loop();
}
int powersave_nap;
@@ -342,42 +126,3 @@ register_powersave_nap_sysctl(void)
}
__initcall(register_powersave_nap_sysctl);
#endif
-
-int idle_setup(void)
-{
- /*
- * Move that junk to each platform specific file, eventually define
- * a pSeries_idle for shared processor stuff
- */
-#ifdef CONFIG_PPC_ISERIES
- idle_loop = iSeries_idle;
- return 1;
-#else
- idle_loop = default_idle;
-#endif
-#ifdef CONFIG_PPC_PSERIES
- if (systemcfg->platform & PLATFORM_PSERIES) {
- if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
- if (get_paca()->lppaca.shared_proc) {
- printk(KERN_INFO "Using shared processor idle loop\n");
- idle_loop = shared_idle;
- } else {
- printk(KERN_INFO "Using dedicated idle loop\n");
- idle_loop = dedicated_idle;
- }
- } else {
- printk(KERN_INFO "Using default idle loop\n");
- idle_loop = default_idle;
- }
- }
-#endif /* CONFIG_PPC_PSERIES */
-#ifndef CONFIG_PPC_ISERIES
- if (systemcfg->platform == PLATFORM_POWERMAC ||
- systemcfg->platform == PLATFORM_MAPLE) {
- printk(KERN_INFO "Using native/NAP idle loop\n");
- idle_loop = native_idle;
- }
-#endif /* CONFIG_PPC_ISERIES */
-
- return 1;
-}
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 1d2ff6d6b0b..a3d519518fb 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -444,7 +444,7 @@ static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
-int __init arch_init(void)
+int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index da8900b51f4..bb55b5a5691 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -177,6 +177,8 @@ void __init maple_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
+
+ printk(KERN_INFO "Using native/NAP idle loop\n");
}
/*
@@ -297,4 +299,5 @@ struct machdep_calls __initdata maple_md = {
.get_rtc_time = maple_get_rtc_time,
.calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
+ .idle_loop = native_idle,
};
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index f3dea0c5a88..59f4f997381 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -1124,9 +1124,11 @@ _GLOBAL(sys_call_table32)
.llong .compat_sys_mq_getsetattr
.llong .compat_sys_kexec_load
.llong .sys32_add_key
- .llong .sys32_request_key
+ .llong .sys32_request_key /* 270 */
.llong .compat_sys_keyctl
.llong .compat_sys_waitid
+ .llong .sys32_ioprio_set
+ .llong .sys32_ioprio_get
.balign 8
_GLOBAL(sys_call_table)
@@ -1403,3 +1405,5 @@ _GLOBAL(sys_call_table)
.llong .sys_request_key /* 270 */
.llong .sys_keyctl
.llong .sys_waitid
+ .llong .sys_ioprio_set
+ .llong .sys_ioprio_get
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
index 66bd5ab7c25..b80e81984ba 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/ppc64/kernel/of_device.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <asm/errno.h>
#include <asm/of_device.h>
@@ -15,20 +16,20 @@
* Used by a driver to check whether an of_device present in the
* system is in its list of supported devices.
*/
-const struct of_match * of_match_device(const struct of_match *matches,
+const struct of_device_id *of_match_device(const struct of_device_id *matches,
const struct of_device *dev)
{
if (!dev->node)
return NULL;
- while (matches->name || matches->type || matches->compatible) {
+ while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
- if (matches->name && matches->name != OF_ANY_MATCH)
+ if (matches->name[0])
match &= dev->node->name
&& !strcmp(matches->name, dev->node->name);
- if (matches->type && matches->type != OF_ANY_MATCH)
+ if (matches->type[0])
match &= dev->node->type
&& !strcmp(matches->type, dev->node->type);
- if (matches->compatible && matches->compatible != OF_ANY_MATCH)
+ if (matches->compatible[0])
match &= device_is_compatible(dev->node,
matches->compatible);
if (match)
@@ -42,7 +43,7 @@ static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
{
struct of_device * of_dev = to_of_device(dev);
struct of_platform_driver * of_drv = to_of_platform_driver(drv);
- const struct of_match * matches = of_drv->match_table;
+ const struct of_device_id * matches = of_drv->match_table;
if (!matches)
return 0;
@@ -75,7 +76,7 @@ static int of_device_probe(struct device *dev)
int error = -ENODEV;
struct of_platform_driver *drv;
struct of_device *of_dev;
- const struct of_match *match;
+ const struct of_device_id *match;
drv = to_of_platform_driver(dev->driver);
of_dev = to_of_device(dev);
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 44d9af72d22..5bec956e44a 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -19,6 +19,7 @@
#undef DEBUG
#include <linux/config.h>
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -82,6 +83,9 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
extern void pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
+static int pseries_shared_idle(void);
+static int pseries_dedicated_idle(void);
+
static volatile void __iomem * chrp_int_ack_special;
struct mpic *pSeries_mpic;
@@ -229,6 +233,20 @@ static void __init pSeries_setup_arch(void)
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
vpa_init(boot_cpuid);
+
+ /* Choose an idle loop */
+ if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
+ if (get_paca()->lppaca.shared_proc) {
+ printk(KERN_INFO "Using shared processor idle loop\n");
+ ppc_md.idle_loop = pseries_shared_idle;
+ } else {
+ printk(KERN_INFO "Using dedicated idle loop\n");
+ ppc_md.idle_loop = pseries_dedicated_idle;
+ }
+ } else {
+ printk(KERN_INFO "Using default idle loop\n");
+ ppc_md.idle_loop = default_idle;
+ }
}
static int __init pSeries_init_panel(void)
@@ -418,6 +436,144 @@ static int __init pSeries_probe(int platform)
return 1;
}
+DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
+
+static inline void dedicated_idle_sleep(unsigned int cpu)
+{
+ struct paca_struct *ppaca = &paca[cpu ^ 1];
+
+ /* Only sleep if the other thread is not idle */
+ if (!(ppaca->lppaca.idle)) {
+ local_irq_disable();
+
+ /*
+ * We are about to sleep the thread and so wont be polling any
+ * more.
+ */
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+
+ /*
+ * SMT dynamic mode. Cede will result in this thread going
+ * dormant, if the partner thread is still doing work. Thread
+ * wakes up if partner goes idle, an interrupt is presented, or
+ * a prod occurs. Returning from the cede enables external
+ * interrupts.
+ */
+ if (!need_resched())
+ cede_processor();
+ else
+ local_irq_enable();
+ } else {
+ /*
+ * Give the HV an opportunity at the processor, since we are
+ * not doing any work.
+ */
+ poll_pending();
+ }
+}
+
+static int pseries_dedicated_idle(void)
+{
+ long oldval;
+ struct paca_struct *lpaca = get_paca();
+ unsigned int cpu = smp_processor_id();
+ unsigned long start_snooze;
+ unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
+
+ while (1) {
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ lpaca->lppaca.idle = 1;
+
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ start_snooze = __get_tb() +
+ *smt_snooze_delay * tb_ticks_per_usec;
+
+ while (!need_resched() && !cpu_is_offline(cpu)) {
+ ppc64_runlatch_off();
+
+ /*
+ * Go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
+
+ if (*smt_snooze_delay != 0 &&
+ __get_tb() > start_snooze) {
+ HMT_medium();
+ dedicated_idle_sleep(cpu);
+ }
+
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+
+ lpaca->lppaca.idle = 0;
+ ppc64_runlatch_on();
+
+ schedule();
+
+ if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+ cpu_die();
+ }
+}
+
+static int pseries_shared_idle(void)
+{
+ struct paca_struct *lpaca = get_paca();
+ unsigned int cpu = smp_processor_id();
+
+ while (1) {
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ lpaca->lppaca.idle = 1;
+
+ while (!need_resched() && !cpu_is_offline(cpu)) {
+ local_irq_disable();
+ ppc64_runlatch_off();
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ *
+ * Check need_resched() again with interrupts disabled
+ * to avoid a race.
+ */
+ if (!need_resched())
+ cede_processor();
+ else
+ local_irq_enable();
+
+ HMT_medium();
+ }
+
+ lpaca->lppaca.idle = 0;
+ ppc64_runlatch_on();
+
+ schedule();
+
+ if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+ cpu_die();
+ }
+
+ return 0;
+}
+
struct machdep_calls __initdata pSeries_md = {
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 6cf03d387b9..3013cdb5f93 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -186,6 +186,8 @@ void __init pmac_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
+
+ printk(KERN_INFO "Using native/NAP idle loop\n");
}
#ifdef CONFIG_SCSI
@@ -507,5 +509,6 @@ struct machdep_calls __initdata pmac_md = {
.calibrate_decr = pmac_calibrate_decr,
.feature_call = pmac_do_feature_call,
.progress = pmac_progress,
- .check_legacy_ioport = pmac_check_legacy_ioport
+ .check_legacy_ioport = pmac_check_legacy_ioport,
+ .idle_loop = native_idle,
};
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index d5e4866e9ac..d1b33f0b26c 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -96,7 +96,6 @@ extern void udbg_init_maple_realmode(void);
extern unsigned long klimit;
extern void mm_init_ppc64(void);
-extern int idle_setup(void);
extern void stab_initialize(unsigned long stab);
extern void htab_initialize(void);
extern void early_init_devtree(void *flat_dt);
@@ -1081,8 +1080,11 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
- /* Select the correct idle loop for the platform. */
- idle_setup();
+ /* Use the default idle loop if the platform hasn't provided one. */
+ if (NULL == ppc_md.idle_loop) {
+ ppc_md.idle_loop = default_idle;
+ printk(KERN_INFO "Using default idle loop\n");
+ }
paging_init();
ppc64_boot_msg(0x15, "Setup Done");
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index 118436e8085..206619080e6 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -30,47 +30,26 @@
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/aio.h>
-#include <linux/nfs_fs.h>
-#include <linux/module.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
-#include <linux/filter.h>
-#include <linux/highmem.h>
-#include <linux/highuid.h>
#include <linux/mman.h>
-#include <linux/ipv6.h>
#include <linux/in.h>
-#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
-#include <linux/dnotify.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/ptrace.h>
-#include <linux/aio_abi.h>
#include <linux/elf.h>
-#include <net/scm.h>
-#include <net/sock.h>
-
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/ipc.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/semaphore.h>
-#include <asm/ppcdebug.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/systemcfg.h>
@@ -350,8 +329,6 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
return ret;
}
-
-/* These are here just in case some old sparc32 binary calls it. */
asmlinkage long sys32_pause(void)
{
current->state = TASK_INTERRUPTIBLE;
@@ -360,8 +337,6 @@ asmlinkage long sys32_pause(void)
return -ERESTARTNOHAND;
}
-
-
static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
{
long usec;
@@ -847,16 +822,6 @@ asmlinkage long sys32_getpgid(u32 pid)
}
-/* Note: it is necessary to treat which and who as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long sys32_getpriority(u32 which, u32 who)
-{
- return sys_getpriority((int)which, (int)who);
-}
-
/* Note: it is necessary to treat pid as an unsigned int,
* with the corresponding cast to a signed int to insure that the
@@ -1048,6 +1013,11 @@ asmlinkage long sys32_setpgid(u32 pid, u32 pgid)
return sys_setpgid((int)pid, (int)pgid);
}
+long sys32_getpriority(u32 which, u32 who)
+{
+ /* sign extend which and who */
+ return sys_getpriority((int)which, (int)who);
+}
long sys32_setpriority(u32 which, u32 who, u32 niceval)
{
@@ -1055,6 +1025,18 @@ long sys32_setpriority(u32 which, u32 who, u32 niceval)
return sys_setpriority((int)which, (int)who, (int)niceval);
}
+long sys32_ioprio_get(u32 which, u32 who)
+{
+ /* sign extend which and who */
+ return sys_ioprio_get((int)which, (int)who);
+}
+
+long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
+{
+ /* sign extend which, who and ioprio */
+ return sys_ioprio_set((int)which, (int)who, (int)ioprio);
+}
+
/* Note: it is necessary to treat newmask as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@@ -1273,8 +1255,6 @@ long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
(u64)len_high << 32 | len_low, advice);
}
-extern asmlinkage long sys_timer_create(clockid_t, sigevent_t __user *, timer_t __user *);
-
long ppc32_timer_create(clockid_t clock,
struct compat_sigevent __user *ev32,
timer_t __user *timer_id)
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index 2f704a2cafb..02b8ac4e016 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -112,7 +112,6 @@ void ppc64_enable_pmcs(void)
unsigned long hid0;
#ifdef CONFIG_PPC_PSERIES
unsigned long set, reset;
- int ret;
#endif /* CONFIG_PPC_PSERIES */
/* Only need to enable them once */
@@ -145,11 +144,7 @@ void ppc64_enable_pmcs(void)
case PLATFORM_PSERIES_LPAR:
set = 1UL << 63;
reset = 0;
- ret = plpar_hcall_norets(H_PERFMON, set, reset);
- if (ret)
- printk(KERN_ERR "H_PERFMON call on cpu %u "
- "returned %d\n",
- smp_processor_id(), ret);
+ plpar_hcall_norets(H_PERFMON, set, reset);
break;
#endif /* CONFIG_PPC_PSERIES */
@@ -161,13 +156,6 @@ void ppc64_enable_pmcs(void)
/* instruct hypervisor to maintain PMCs */
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
get_paca()->lppaca.pmcregs_in_use = 1;
-
- /*
- * On SMT machines we have to set the run latch in the ctrl register
- * in order to make PMC6 spin.
- */
- if (cpu_has_feature(CPU_FTR_SMT))
- ppc64_runlatch_on();
#endif /* CONFIG_PPC_PSERIES */
}
diff --git a/arch/ppc64/kernel/vdso32/vdso32.lds.S b/arch/ppc64/kernel/vdso32/vdso32.lds.S
index 11290c902ba..6f87a916a39 100644
--- a/arch/ppc64/kernel/vdso32/vdso32.lds.S
+++ b/arch/ppc64/kernel/vdso32/vdso32.lds.S
@@ -40,9 +40,9 @@ SECTIONS
.gcc_except_table : { *(.gcc_except_table) }
.fixup : { *(.fixup) }
- .got ALIGN(4) : { *(.got.plt) *(.got) }
-
.dynamic : { *(.dynamic) } :text :dynamic
+ .got : { *(.got) }
+ .plt : { *(.plt) }
_end = .;
__end = .;