From 96e2844999f99878fc5b03b81ccaa60580005b81 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 13 Jul 2005 01:11:42 -0700 Subject: [PATCH] ppc64: kill bitfields in ppc64 hash code This patch removes the use of bitfield types from the ppc64 hash table manipulation code. Signed-off-by: David Gibson Acked-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/iSeries/HvCallHpt.h | 11 +++-- include/asm-ppc64/machdep.h | 6 +-- include/asm-ppc64/mmu.h | 83 ++++++++++++----------------------- 3 files changed, 36 insertions(+), 64 deletions(-) (limited to 'include/asm-ppc64') diff --git a/include/asm-ppc64/iSeries/HvCallHpt.h b/include/asm-ppc64/iSeries/HvCallHpt.h index 66f38222ff7..43a1969230b 100644 --- a/include/asm-ppc64/iSeries/HvCallHpt.h +++ b/include/asm-ppc64/iSeries/HvCallHpt.h @@ -77,27 +77,26 @@ static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson, return compressedStatus; } -static inline u64 HvCallHpt_findValid(HPTE *hpte, u64 vpn) +static inline u64 HvCallHpt_findValid(hpte_t *hpte, u64 vpn) { return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0); } -static inline u64 HvCallHpt_findNextValid(HPTE *hpte, u32 hpteIndex, +static inline u64 HvCallHpt_findNextValid(hpte_t *hpte, u32 hpteIndex, u8 bitson, u8 bitsoff) { return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex, bitson, bitsoff); } -static inline void HvCallHpt_get(HPTE *hpte, u32 hpteIndex) +static inline void HvCallHpt_get(hpte_t *hpte, u32 hpteIndex) { HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0); } -static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, HPTE *hpte) +static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte) { - HvCall4(HvCallHptAddValidate, hpteIndex, hBit, (*((u64 *)hpte)), - (*(((u64 *)hpte)+1))); + HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r); } #endif /* _HVCALLHPT_H */ diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h index 1e6ad482413..f0c1d2d9267 100644 --- a/include/asm-ppc64/machdep.h +++ b/include/asm-ppc64/machdep.h @@ -53,10 +53,8 @@ struct machdep_calls { long (*hpte_insert)(unsigned long hpte_group, unsigned long va, unsigned long prpn, - int secondary, - unsigned long hpteflags, - int bolted, - int large); + unsigned long vflags, + unsigned long rflags); long (*hpte_remove)(unsigned long hpte_group); void (*flush_hash_range)(unsigned long context, unsigned long number, diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index f373de5e3dd..3d07ddd11e3 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -60,6 +60,22 @@ #define HPTES_PER_GROUP 8 +#define HPTE_V_AVPN_SHIFT 7 +#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) +#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) +#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) +#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) +#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) +#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) +#define HPTE_V_VALID ASM_CONST(0x0000000000000001) + +#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) +#define HPTE_R_TS ASM_CONST(0x4000000000000000) +#define HPTE_R_RPN_SHIFT 12 +#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) +#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) +#define HPTE_R_PP ASM_CONST(0x0000000000000003) + /* Values for PP (assumes Ks=0, Kp=1) */ /* pp0 will always be 0 for linux */ #define PP_RWXX 0 /* Supervisor read/write, User none */ @@ -69,54 +85,13 @@ #ifndef __ASSEMBLY__ -/* Hardware Page Table Entry */ -typedef struct { - unsigned long avpn:57; /* vsid | api == avpn */ - unsigned long : 2; /* Software use */ - unsigned long bolted: 1; /* HPTE is "bolted" */ - unsigned long lock: 1; /* lock on pSeries SMP */ - unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */ - unsigned long h: 1; /* Hash function identifier */ - unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */ -} Hpte_dword0; - -typedef struct { - unsigned long pp0: 1; /* Page protection bit 0 */ - unsigned long ts: 1; /* Tag set bit */ - unsigned long rpn: 50; /* Real page number */ - unsigned long : 2; /* Reserved */ - unsigned long ac: 1; /* Address compare */ - unsigned long r: 1; /* Referenced */ - unsigned long c: 1; /* Changed */ - unsigned long w: 1; /* Write-thru cache mode */ - unsigned long i: 1; /* Cache inhibited */ - unsigned long m: 1; /* Memory coherence required */ - unsigned long g: 1; /* Guarded */ - unsigned long n: 1; /* No-execute */ - unsigned long pp: 2; /* Page protection bits 1:2 */ -} Hpte_dword1; - -typedef struct { - char padding[6]; /* padding */ - unsigned long : 6; /* padding */ - unsigned long flags: 10; /* HPTE flags */ -} Hpte_dword1_flags; - typedef struct { - union { - unsigned long dword0; - Hpte_dword0 dw0; - } dw0; - - union { - unsigned long dword1; - Hpte_dword1 dw1; - Hpte_dword1_flags flags; - } dw1; -} HPTE; + unsigned long v; + unsigned long r; +} hpte_t; -extern HPTE * htab_address; -extern unsigned long htab_hash_mask; +extern hpte_t *htab_address; +extern unsigned long htab_hash_mask; static inline unsigned long hpt_hash(unsigned long vpn, int large) { @@ -181,18 +156,18 @@ static inline void tlbiel(unsigned long va) asm volatile("ptesync": : :"memory"); } -static inline unsigned long slot2va(unsigned long avpn, unsigned long large, - unsigned long secondary, unsigned long slot) +static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot) { + unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v); unsigned long va; va = avpn << 23; - if (!large) { + if (! (hpte_v & HPTE_V_LARGE)) { unsigned long vpi, pteg; pteg = slot / HPTES_PER_GROUP; - if (secondary) + if (hpte_v & HPTE_V_SECONDARY) pteg = ~pteg; vpi = ((va >> 28) ^ pteg) & htab_hash_mask; @@ -219,11 +194,11 @@ extern void hpte_init_iSeries(void); extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, - int secondary, unsigned long hpteflags, - int bolted, int large); + unsigned long vflags, + unsigned long rflags); extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, - unsigned long prpn, int secondary, - unsigned long hpteflags, int bolted, int large); + unsigned long prpn, + unsigned long vflags, unsigned long rflags); #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 7c9034735eccbf82608a4602c59aaf6053ea9416 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 26 Jul 2005 11:29:55 -0600 Subject: [PATCH] Add emergency_restart() When the kernel is working well and we want to restart cleanly kernel_restart is the function to use. But in many instances the kernel wants to reboot when thing are expected to be working very badly such as from panic or a software watchdog handler. This patch adds the function emergency_restart() so that callers can be clear what semantics they expect when calling restart. emergency_restart() is expected to be callable from interrupt context and possibly reliable in even more trying circumstances. This is an initial generic implementation for all architectures. Signed-off-by: Eric W. Biederman Signed-off-by: Linus Torvalds --- include/asm-ppc64/emergency-restart.h | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 include/asm-ppc64/emergency-restart.h (limited to 'include/asm-ppc64') diff --git a/include/asm-ppc64/emergency-restart.h b/include/asm-ppc64/emergency-restart.h new file mode 100644 index 00000000000..108d8c48e42 --- /dev/null +++ b/include/asm-ppc64/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_EMERGENCY_RESTART_H */ -- cgit v1.2.3 From 533f08172e21521a74e15cdef8a13c929596d506 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 27 Jul 2005 11:44:19 -0700 Subject: [PATCH] ppc64: dynamically allocate segment tables PPC64 machines before Power4 need a segment table page allocated for each CPU. Currently these are allocated statically in a big array in head.S for all CPUs. The segment tables need to be in the first segment (so do_stab_bolted doesn't take a recursive fault on the stab itself), but other than that there are no constraints which require the stabs for the secondary CPUs to be statically allocated. This patch allocates segment tables dynamically during boot, using lmb_alloc() to ensure they are within the first 256M segment. This reduces the kernel image size by 192k... Tested on RS64 iSeries, POWER3 pSeries, and POWER5. Signed-off-by: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/mmu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/asm-ppc64') diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 3d07ddd11e3..88911803680 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -200,6 +200,8 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long prpn, unsigned long vflags, unsigned long rflags); +extern void stabs_alloc(void); + #endif /* __ASSEMBLY__ */ /* -- cgit v1.2.3 From 488f84994c55927eef587a0827dc957c908a0bad Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 27 Jul 2005 11:44:21 -0700 Subject: [PATCH] ppc64: remove another fixed address constraint Presently the LparMap, one of the structures the kernel shares with the legacy iSeries hypervisor has a fixed offset address in head.S. This patch changes this so the LparMap is a normally initialized structure, without fixed address. This allows us to use macros to compute some of the values in the structure, which wasn't previously possible because the assembler always uses signed-% which gets the wrong answers for the computations in question. Unfortunately, a gcc bug means that doing this requires another structure (hvReleaseData) to be initialized in asm instead of C, but on the whole the result is cleaner than before. Signed-off-by: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/iSeries/HvReleaseData.h | 11 ++++++----- include/asm-ppc64/iSeries/LparMap.h | 23 +++++++++++++++-------- include/asm-ppc64/mmu.h | 3 +++ 3 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include/asm-ppc64') diff --git a/include/asm-ppc64/iSeries/HvReleaseData.h b/include/asm-ppc64/iSeries/HvReleaseData.h index 01a1f13ea4a..c8162e5ccb2 100644 --- a/include/asm-ppc64/iSeries/HvReleaseData.h +++ b/include/asm-ppc64/iSeries/HvReleaseData.h @@ -39,6 +39,11 @@ * know that this PLIC does not support running an OS "that old". */ +#define HVREL_TAGSINACTIVE 0x8000 +#define HVREL_32BIT 0x4000 +#define HVREL_NOSHAREDPROCS 0x2000 +#define HVREL_NOHMT 0x1000 + struct HvReleaseData { u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */ u16 xSize; /* Size of this control block x04-x05 */ @@ -46,11 +51,7 @@ struct HvReleaseData { struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */ u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */ u32 xRsvd1; /* Reserved x14-x17 */ - u16 xTagsMode:1; /* 0 == tags active, 1 == tags inactive */ - u16 xAddressSize:1; /* 0 == 64-bit, 1 == 32-bit */ - u16 xNoSharedProcs:1; /* 0 == shared procs, 1 == no shared */ - u16 xNoHMT:1; /* 0 == allow HMT, 1 == no HMT */ - u16 xRsvd2:12; /* Reserved x18-x19 */ + u16 xFlags; u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */ u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */ u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */ diff --git a/include/asm-ppc64/iSeries/LparMap.h b/include/asm-ppc64/iSeries/LparMap.h index 038e5df7e9f..5c32e38c1c0 100644 --- a/include/asm-ppc64/iSeries/LparMap.h +++ b/include/asm-ppc64/iSeries/LparMap.h @@ -49,19 +49,26 @@ * entry to map the Esid to the Vsid. */ +#define HvEsidsToMap 2 +#define HvRangesToMap 1 + /* Hypervisor initially maps 32MB of the load area */ #define HvPagesToMap 8192 struct LparMap { - u64 xNumberEsids; // Number of ESID/VSID pairs (1) - u64 xNumberRanges; // Number of VA ranges to map (1) - u64 xSegmentTableOffs; // Page number within load area of seg table (0) + u64 xNumberEsids; // Number of ESID/VSID pairs + u64 xNumberRanges; // Number of VA ranges to map + u64 xSegmentTableOffs; // Page number within load area of seg table u64 xRsvd[5]; - u64 xKernelEsid; // Esid used to map kernel load (0x0C00000000) - u64 xKernelVsid; // Vsid used to map kernel load (0x0C00000000) - u64 xPages; // Number of pages to be mapped (8192) - u64 xOffset; // Offset from start of load area (0) - u64 xVPN; // Virtual Page Number (0x000C000000000000) + struct { + u64 xKernelEsid; // Esid used to map kernel load + u64 xKernelVsid; // Vsid used to map kernel load + } xEsids[HvEsidsToMap]; + struct { + u64 xPages; // Number of pages to be mapped + u64 xOffset; // Offset from start of load area + u64 xVPN; // Virtual Page Number + } xRanges[HvRangesToMap]; }; extern struct LparMap xLparMap; diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 88911803680..70348a85131 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -338,6 +338,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea) | (ea >> SID_SHIFT)); } +#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) +#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) + #endif /* __ASSEMBLY */ #endif /* _PPC64_MMU_H_ */ -- cgit v1.2.3