aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 15:55:21 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:32 -0800
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /include
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc64/mmu.h3
-rw-r--r--include/asm-sparc64/tsb.h94
2 files changed, 91 insertions, 6 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 18f98edfbcd..55e622711b9 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,7 +97,8 @@ struct tsb {
unsigned long pte;
} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
-extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte);
+extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+extern void tsb_flush(unsigned long ent, unsigned long tag);
typedef struct {
unsigned long sparc64_ctx_val;
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index f384565212f..44709cde561 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -44,7 +44,89 @@
#define TSB_MEMBAR membar #StoreStore
+/* Some cpus support physical address quad loads. We want to use
+ * those if possible so we don't need to hard-lock the TSB mapping
+ * into the TLB. We encode some instruction patching in order to
+ * support this.
+ *
+ * The kernel TSB is locked into the TLB by virtue of being in the
+ * kernel image, so we don't play these games for swapper_tsb access.
+ */
+#ifndef __ASSEMBLY__
+struct tsb_phys_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+#endif
+#define TSB_LOAD_QUAD(TSB, REG) \
+661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
+ .previous
+
+#define TSB_LOAD_TAG_HIGH(TSB, REG) \
+661: lduwa [TSB] ASI_N, REG; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ lduwa [TSB] ASI_PHYS_USE_EC, REG; \
+ .previous
+
+#define TSB_LOAD_TAG(TSB, REG) \
+661: ldxa [TSB] ASI_N, REG; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ ldxa [TSB] ASI_PHYS_USE_EC, REG; \
+ .previous
+
+#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
+661: casa [TSB] ASI_N, REG1, REG2; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
+ .previous
+
+#define TSB_CAS_TAG(TSB, REG1, REG2) \
+661: casxa [TSB] ASI_N, REG1, REG2; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
+ .previous
+
+#define TSB_STORE(ADDR, VAL) \
+661: stxa VAL, [ADDR] ASI_N; \
+ .section .tsb_phys_patch, "ax"; \
+ .word 661b; \
+ stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
+ .previous
+
#define TSB_LOCK_TAG(TSB, REG1, REG2) \
+99: TSB_LOAD_TAG_HIGH(TSB, REG1); \
+ sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
+ andcc REG1, REG2, %g0; \
+ bne,pn %icc, 99b; \
+ nop; \
+ TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
+ cmp REG1, REG2; \
+ bne,pn %icc, 99b; \
+ nop; \
+ TSB_MEMBAR
+
+#define TSB_WRITE(TSB, TTE, TAG) \
+ add TSB, 0x8, TSB; \
+ TSB_STORE(TSB, TTE); \
+ sub TSB, 0x8, TSB; \
+ TSB_MEMBAR; \
+ TSB_STORE(TSB, TAG);
+
+#define KTSB_LOAD_QUAD(TSB, REG) \
+ ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
+
+#define KTSB_STORE(ADDR, VAL) \
+ stxa VAL, [ADDR] ASI_N;
+
+#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
99: lduwa [TSB] ASI_N, REG1; \
sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
andcc REG1, REG2, %g0; \
@@ -56,10 +138,12 @@
nop; \
TSB_MEMBAR
-#define TSB_WRITE(TSB, TTE, TAG) \
- stx TTE, [TSB + 0x08]; \
- TSB_MEMBAR; \
- stx TAG, [TSB + 0x00];
+#define KTSB_WRITE(TSB, TTE, TAG) \
+ add TSB, 0x8, TSB; \
+ stxa TTE, [TSB] ASI_N; \
+ sub TSB, 0x8, TSB; \
+ TSB_MEMBAR; \
+ stxa TAG, [TSB] ASI_N;
/* Do a kernel page table walk. Leaves physical PTE pointer in
* REG1. Jumps to FAIL_LABEL on early page table walk termination.
@@ -157,7 +241,7 @@
and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
- ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \
+ KTSB_LOAD_QUAD(REG2, REG3); \
cmp REG3, TAG; \
be,a,pt %xcc, OK_LABEL; \
mov REG4, REG1;