aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-16 20:56:49 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-16 20:56:49 -0800
commita1bc5cdf9f9550bd7e9e5d8400e95b164165b275 (patch)
tree3d4e80bb369675765e39450c55c6140e1213da81 /arch
parent8dca6f33f026dc8a7fc2710b78a7521e899bd611 (diff)
parent859538763155814d217054eb6e3cdff71bdb4d89 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h269
-rw-r--r--arch/ia64/mm/init.c36
-rw-r--r--arch/ia64/pci/pci.c19
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h10
-rw-r--r--arch/ia64/sn/include/xtalk/xbow.h206
-rw-r--r--arch/ia64/sn/include/xtalk/xwidgetdev.h46
-rw-r--r--arch/ia64/sn/kernel/io_init.c10
-rw-r--r--arch/ia64/sn/kernel/irq.c10
-rw-r--r--arch/ia64/sn/kernel/tiocx.c18
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c16
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c44
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c12
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c28
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c36
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c68
16 files changed, 578 insertions, 252 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index bd87cb6b7a8..2ea4b39efff 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -628,9 +628,11 @@ static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count,
#include "perfmon_itanium.h"
#include "perfmon_mckinley.h"
+#include "perfmon_montecito.h"
#include "perfmon_generic.h"
static pmu_config_t *pmu_confs[]={
+ &pmu_conf_mont,
&pmu_conf_mck,
&pmu_conf_ita,
&pmu_conf_gen, /* must be last */
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
new file mode 100644
index 00000000000..cd06ac6a686
--- /dev/null
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -0,0 +1,269 @@
+/*
+ * This file contains the Montecito PMU register description tables
+ * and pmc checker used by perfmon.c.
+ *
+ * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
+ * Contributed by Stephane Eranian <eranian@hpl.hp.com>
+ */
+static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+
+#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
+ RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
+#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
+#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
+
+static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
+/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
+/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
+/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
+/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
+/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
+/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
+/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
+/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
+/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
+/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
+/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
+/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
+/* pmc16 */ { PFM_REG_NOTIMPL, },
+/* pmc17 */ { PFM_REG_NOTIMPL, },
+/* pmc18 */ { PFM_REG_NOTIMPL, },
+/* pmc19 */ { PFM_REG_NOTIMPL, },
+/* pmc20 */ { PFM_REG_NOTIMPL, },
+/* pmc21 */ { PFM_REG_NOTIMPL, },
+/* pmc22 */ { PFM_REG_NOTIMPL, },
+/* pmc23 */ { PFM_REG_NOTIMPL, },
+/* pmc24 */ { PFM_REG_NOTIMPL, },
+/* pmc25 */ { PFM_REG_NOTIMPL, },
+/* pmc26 */ { PFM_REG_NOTIMPL, },
+/* pmc27 */ { PFM_REG_NOTIMPL, },
+/* pmc28 */ { PFM_REG_NOTIMPL, },
+/* pmc29 */ { PFM_REG_NOTIMPL, },
+/* pmc30 */ { PFM_REG_NOTIMPL, },
+/* pmc31 */ { PFM_REG_NOTIMPL, },
+/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
+/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
+/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
+/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
+ { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
+};
+
+static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
+/* pmd0 */ { PFM_REG_NOTIMPL, },
+/* pmd1 */ { PFM_REG_NOTIMPL, },
+/* pmd2 */ { PFM_REG_NOTIMPL, },
+/* pmd3 */ { PFM_REG_NOTIMPL, },
+/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
+/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
+/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
+/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
+/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
+/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
+/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
+/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
+/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
+/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
+/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
+/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
+/* pmd16 */ { PFM_REG_NOTIMPL, },
+/* pmd17 */ { PFM_REG_NOTIMPL, },
+/* pmd18 */ { PFM_REG_NOTIMPL, },
+/* pmd19 */ { PFM_REG_NOTIMPL, },
+/* pmd20 */ { PFM_REG_NOTIMPL, },
+/* pmd21 */ { PFM_REG_NOTIMPL, },
+/* pmd22 */ { PFM_REG_NOTIMPL, },
+/* pmd23 */ { PFM_REG_NOTIMPL, },
+/* pmd24 */ { PFM_REG_NOTIMPL, },
+/* pmd25 */ { PFM_REG_NOTIMPL, },
+/* pmd26 */ { PFM_REG_NOTIMPL, },
+/* pmd27 */ { PFM_REG_NOTIMPL, },
+/* pmd28 */ { PFM_REG_NOTIMPL, },
+/* pmd29 */ { PFM_REG_NOTIMPL, },
+/* pmd30 */ { PFM_REG_NOTIMPL, },
+/* pmd31 */ { PFM_REG_NOTIMPL, },
+/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
+/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
+/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
+/* pmd37 */ { PFM_REG_NOTIMPL, },
+/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd40 */ { PFM_REG_NOTIMPL, },
+/* pmd41 */ { PFM_REG_NOTIMPL, },
+/* pmd42 */ { PFM_REG_NOTIMPL, },
+/* pmd43 */ { PFM_REG_NOTIMPL, },
+/* pmd44 */ { PFM_REG_NOTIMPL, },
+/* pmd45 */ { PFM_REG_NOTIMPL, },
+/* pmd46 */ { PFM_REG_NOTIMPL, },
+/* pmd47 */ { PFM_REG_NOTIMPL, },
+/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
+ { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
+};
+
+/*
+ * PMC reserved fields must have their power-up values preserved
+ */
+static int
+pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+{
+ unsigned long tmp1, tmp2, ival = *val;
+
+ /* remove reserved areas from user value */
+ tmp1 = ival & PMC_RSVD_MASK(cnum);
+
+ /* get reserved fields values */
+ tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
+
+ *val = tmp1 | tmp2;
+
+ DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
+ cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
+ return 0;
+}
+
+/*
+ * task can be NULL if the context is unloaded
+ */
+static int
+pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+{
+ int ret = 0;
+ unsigned long val32 = 0, val38 = 0, val41 = 0;
+ unsigned long tmpval;
+ int check_case1 = 0;
+ int is_loaded;
+
+ /* first preserve the reserved fields */
+ pfm_mont_reserved(cnum, val, regs);
+
+ tmpval = *val;
+
+ /* sanity check */
+ if (ctx == NULL) return -EINVAL;
+
+ is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
+
+ /*
+ * we must clear the debug registers if pmc41 has a value which enable
+ * memory pipeline event constraints. In this case we need to clear the
+ * the debug registers if they have not yet been accessed. This is required
+ * to avoid picking stale state.
+ * PMC41 is "active" if:
+ * one of the pmc41.cfg_dtagXX field is different from 0x3
+ * AND
+ * at the corresponding pmc41.en_dbrpXX is set.
+ * AND
+ * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
+ */
+ DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
+
+ if (cnum == 41 && is_loaded
+ && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
+
+ DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
+
+ /* don't mix debug with perfmon */
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+
+ /*
+ * a count of 0 will mark the debug registers if:
+ * AND
+ */
+ ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
+ if (ret) return ret;
+ }
+ /*
+ * we must clear the (instruction) debug registers if:
+ * pmc38.ig_ibrpX is 0 (enabled)
+ * AND
+ * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
+ */
+ if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
+
+ DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
+
+ /* don't mix debug with perfmon */
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+
+ /*
+ * a count of 0 will mark the debug registers as in use and also
+ * ensure that they are properly cleared.
+ */
+ ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
+ if (ret) return ret;
+
+ }
+ switch(cnum) {
+ case 32: val32 = *val;
+ val38 = ctx->ctx_pmcs[38];
+ val41 = ctx->ctx_pmcs[41];
+ check_case1 = 1;
+ break;
+ case 38: val38 = *val;
+ val32 = ctx->ctx_pmcs[32];
+ val41 = ctx->ctx_pmcs[41];
+ check_case1 = 1;
+ break;
+ case 41: val41 = *val;
+ val32 = ctx->ctx_pmcs[32];
+ val38 = ctx->ctx_pmcs[38];
+ check_case1 = 1;
+ break;
+ }
+ /* check illegal configuration which can produce inconsistencies in tagging
+ * i-side events in L1D and L2 caches
+ */
+ if (check_case1) {
+ ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
+ && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
+ || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
+ if (ret) {
+ DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
+ return -EINVAL;
+ }
+ }
+ *val = tmpval;
+ return 0;
+}
+
+/*
+ * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
+ */
+static pmu_config_t pmu_conf_mont={
+ .pmu_name = "Montecito",
+ .pmu_family = 0x20,
+ .flags = PFM_PMU_IRQ_RESEND,
+ .ovfl_val = (1UL << 47) - 1,
+ .pmd_desc = pfm_mont_pmd_desc,
+ .pmc_desc = pfm_mont_pmc_desc,
+ .num_ibrs = 8,
+ .num_dbrs = 8,
+ .use_rr_dbregs = 1 /* debug register are use for range retrictions */
+};
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index e3215ba64ff..b38b6d213c1 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -635,3 +635,39 @@ mem_init (void)
ia32_mem_init();
#endif
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void online_page(struct page *page)
+{
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ __free_page(page);
+ totalram_pages++;
+ num_physpages++;
+}
+
+int add_memory(u64 start, u64 size)
+{
+ pg_data_t *pgdat;
+ struct zone *zone;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ pgdat = NODE_DATA(0);
+
+ zone = pgdat->node_zones + ZONE_NORMAL;
+ ret = __add_pages(zone, start_pfn, nr_pages);
+
+ if (ret)
+ printk("%s: Problem encountered in __add_pages() as ret=%d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+int remove_memory(u64 start, u64 size)
+{
+ return -EINVAL;
+}
+#endif
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 30dbc98bf0b..d27ecdcb6fc 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -454,14 +454,13 @@ static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
return 0;
}
-static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
+static void __devinit
+pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
{
struct pci_bus_region region;
int i;
- int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \
- PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
- for (i = 0; i < limit; i++) {
+ for (i = start; i < limit; i++) {
if (!dev->resource[i].flags)
continue;
region.start = dev->resource[i].start;
@@ -472,6 +471,16 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
}
}
+static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
+{
+ pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+}
+
+static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
+{
+ pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+}
+
/*
* Called after each bus is probed, but before its children are examined.
*/
@@ -482,7 +491,7 @@ pcibios_fixup_bus (struct pci_bus *b)
if (b->self) {
pci_read_bridge_bases(b);
- pcibios_fixup_device_resources(b->self);
+ pcibios_fixup_bridge_resources(b->self);
}
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
index 4d417c30120..7c88e9a5851 100644
--- a/arch/ia64/sn/include/xtalk/hubdev.h
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -40,8 +40,8 @@ struct sn_flush_device_common {
unsigned long sfdl_force_int_addr;
unsigned long sfdl_flush_value;
volatile unsigned long *sfdl_flush_addr;
- uint32_t sfdl_persistent_busnum;
- uint32_t sfdl_persistent_segment;
+ u32 sfdl_persistent_busnum;
+ u32 sfdl_persistent_segment;
struct pcibus_info *sfdl_pcibus_info;
};
@@ -56,7 +56,7 @@ struct sn_flush_device_kernel {
*/
struct sn_flush_nasid_entry {
struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
- uint64_t iio_itte[8];
+ u64 iio_itte[8];
};
struct hubdev_info {
@@ -70,8 +70,8 @@ struct hubdev_info {
void *hdi_nodepda;
void *hdi_node_vertex;
- uint32_t max_segment_number;
- uint32_t max_pcibus_number;
+ u32 max_segment_number;
+ u32 max_pcibus_number;
};
extern void hubdev_init_node(nodepda_t *, cnodeid_t);
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h
index ec56b3432f1..90f37a4133d 100644
--- a/arch/ia64/sn/include/xtalk/xbow.h
+++ b/arch/ia64/sn/include/xtalk/xbow.h
@@ -3,7 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights
+ * Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XBOW_H
#define _ASM_IA64_SN_XTALK_XBOW_H
@@ -21,94 +22,94 @@
/* Register set for each xbow link */
typedef volatile struct xb_linkregs_s {
-/*
+/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
- uint32_t link_ibf;
- uint32_t filler0; /* filler for proper alignment */
- uint32_t link_control;
- uint32_t filler1;
- uint32_t link_status;
- uint32_t filler2;
- uint32_t link_arb_upper;
- uint32_t filler3;
- uint32_t link_arb_lower;
- uint32_t filler4;
- uint32_t link_status_clr;
- uint32_t filler5;
- uint32_t link_reset;
- uint32_t filler6;
- uint32_t link_aux_status;
- uint32_t filler7;
+ u32 link_ibf;
+ u32 filler0; /* filler for proper alignment */
+ u32 link_control;
+ u32 filler1;
+ u32 link_status;
+ u32 filler2;
+ u32 link_arb_upper;
+ u32 filler3;
+ u32 link_arb_lower;
+ u32 filler4;
+ u32 link_status_clr;
+ u32 filler5;
+ u32 link_reset;
+ u32 filler6;
+ u32 link_aux_status;
+ u32 filler7;
} xb_linkregs_t;
typedef volatile struct xbow_s {
- /* standard widget configuration 0x000000-0x000057 */
- struct widget_cfg xb_widget; /* 0x000000 */
-
- /* helper fieldnames for accessing bridge widget */
-
-#define xb_wid_id xb_widget.w_id
-#define xb_wid_stat xb_widget.w_status
-#define xb_wid_err_upper xb_widget.w_err_upper_addr
-#define xb_wid_err_lower xb_widget.w_err_lower_addr
-#define xb_wid_control xb_widget.w_control
-#define xb_wid_req_timeout xb_widget.w_req_timeout
-#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
-#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
-#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
-#define xb_wid_llp xb_widget.w_llp_cfg
-#define xb_wid_stat_clr xb_widget.w_tflush
-
-/*
+ /* standard widget configuration 0x000000-0x000057 */
+ struct widget_cfg xb_widget; /* 0x000000 */
+
+ /* helper fieldnames for accessing bridge widget */
+
+#define xb_wid_id xb_widget.w_id
+#define xb_wid_stat xb_widget.w_status
+#define xb_wid_err_upper xb_widget.w_err_upper_addr
+#define xb_wid_err_lower xb_widget.w_err_lower_addr
+#define xb_wid_control xb_widget.w_control
+#define xb_wid_req_timeout xb_widget.w_req_timeout
+#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
+#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
+#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
+#define xb_wid_llp xb_widget.w_llp_cfg
+#define xb_wid_stat_clr xb_widget.w_tflush
+
+/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
- /* xbow-specific widget configuration 0x000058-0x0000FF */
- uint32_t xb_wid_arb_reload; /* 0x00005C */
- uint32_t _pad_000058;
- uint32_t xb_perf_ctr_a; /* 0x000064 */
- uint32_t _pad_000060;
- uint32_t xb_perf_ctr_b; /* 0x00006c */
- uint32_t _pad_000068;
- uint32_t xb_nic; /* 0x000074 */
- uint32_t _pad_000070;
-
- /* Xbridge only */
- uint32_t xb_w0_rst_fnc; /* 0x00007C */
- uint32_t _pad_000078;
- uint32_t xb_l8_rst_fnc; /* 0x000084 */
- uint32_t _pad_000080;
- uint32_t xb_l9_rst_fnc; /* 0x00008c */
- uint32_t _pad_000088;
- uint32_t xb_la_rst_fnc; /* 0x000094 */
- uint32_t _pad_000090;
- uint32_t xb_lb_rst_fnc; /* 0x00009c */
- uint32_t _pad_000098;
- uint32_t xb_lc_rst_fnc; /* 0x0000a4 */
- uint32_t _pad_0000a0;
- uint32_t xb_ld_rst_fnc; /* 0x0000ac */
- uint32_t _pad_0000a8;
- uint32_t xb_le_rst_fnc; /* 0x0000b4 */
- uint32_t _pad_0000b0;
- uint32_t xb_lf_rst_fnc; /* 0x0000bc */
- uint32_t _pad_0000b8;
- uint32_t xb_lock; /* 0x0000c4 */
- uint32_t _pad_0000c0;
- uint32_t xb_lock_clr; /* 0x0000cc */
- uint32_t _pad_0000c8;
- /* end of Xbridge only */
- uint32_t _pad_0000d0[12];
-
- /* Link Specific Registers, port 8..15 0x000100-0x000300 */
- xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
-#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
-
+ /* xbow-specific widget configuration 0x000058-0x0000FF */
+ u32 xb_wid_arb_reload; /* 0x00005C */
+ u32 _pad_000058;
+ u32 xb_perf_ctr_a; /* 0x000064 */
+ u32 _pad_000060;
+ u32 xb_perf_ctr_b; /* 0x00006c */
+ u32 _pad_000068;
+ u32 xb_nic; /* 0x000074 */
+ u32 _pad_000070;
+
+ /* Xbridge only */
+ u32 xb_w0_rst_fnc; /* 0x00007C */
+ u32 _pad_000078;
+ u32 xb_l8_rst_fnc; /* 0x000084 */
+ u32 _pad_000080;
+ u32 xb_l9_rst_fnc; /* 0x00008c */
+ u32 _pad_000088;
+ u32 xb_la_rst_fnc; /* 0x000094 */
+ u32 _pad_000090;
+ u32 xb_lb_rst_fnc; /* 0x00009c */
+ u32 _pad_000098;
+ u32 xb_lc_rst_fnc; /* 0x0000a4 */
+ u32 _pad_0000a0;
+ u32 xb_ld_rst_fnc; /* 0x0000ac */
+ u32 _pad_0000a8;
+ u32 xb_le_rst_fnc; /* 0x0000b4 */
+ u32 _pad_0000b0;
+ u32 xb_lf_rst_fnc; /* 0x0000bc */
+ u32 _pad_0000b8;
+ u32 xb_lock; /* 0x0000c4 */
+ u32 _pad_0000c0;
+ u32 xb_lock_clr; /* 0x0000cc */
+ u32 _pad_0000c8;
+ /* end of Xbridge only */
+ u32 _pad_0000d0[12];
+
+ /* Link Specific Registers, port 8..15 0x000100-0x000300 */
+ xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
} xbow_t;
+#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
+
#define XB_FLAGS_EXISTS 0x1 /* device exists */
#define XB_FLAGS_MASTER 0x2
#define XB_FLAGS_SLAVE 0x0
@@ -160,7 +161,7 @@ typedef volatile struct xbow_s {
/* End of Xbridge only */
/* used only in ide, but defined here within the reserved portion */
-/* of the widget0 address space (before 0xf4) */
+/* of the widget0 address space (before 0xf4) */
#define XBOW_WID_UNDEF 0xe4
/* xbow link register set base, legal value for x is 0x8..0xf */
@@ -179,29 +180,37 @@ typedef volatile struct xbow_s {
/* link_control(x) */
#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
- /* reserved: 0x40000000 */
+/* reserved: 0x40000000 */
#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
-#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */
-#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */
-#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */
-#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */
-#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */
-#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */
-#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */
- /* reserved: 0x0000fe00 */
+#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer
+ level */
+#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8
+ bit mode */
+#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP
+ packet */
+#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit
+ mask */
+#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit
+ shift */
+#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination
+ */
+#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input
+ buffer */
+/* reserved: 0x0000fe00 */
#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
#define XB_CTRL_RCV_IE 0x00000010 /* receive */
#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
- /* reserved: 0x00000004 */
-#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */
+/* reserved: 0x00000004 */
+#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request
+ timeout */
#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
/* link_status(x) */
#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
- /* reserved: 0x7ff80000 */
+/* reserved: 0x7ff80000 */
#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
@@ -211,7 +220,7 @@ typedef volatile struct xbow_s {
#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
- /* reserved: 0x00000004 */
+/* reserved: 0x00000004 */
#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
@@ -222,7 +231,7 @@ typedef volatile struct xbow_s {
#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
#define XB_AUX_STAT_PRESENT 0x00000020
#define XB_AUX_STAT_PORT_WIDTH 0x00000010
- /* reserved: 0x0000000f */
+/* reserved: 0x0000000f */
/*
* link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
@@ -238,7 +247,8 @@ typedef volatile struct xbow_s {
/* XBOW_WID_STAT */
#define XB_WID_STAT_LINK_INTR_SHFT (24)
#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
-#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
+#define XB_WID_STAT_LINK_INTR(x) \
+ (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
#define XB_WID_STAT_WIDGET0_INTR 0x00800000
#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
#define XB_WID_STAT_REG_ACC_ERR 0x00000020
@@ -264,7 +274,7 @@ typedef volatile struct xbow_s {
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
#define XBOW_WIDGET_MFGR_NUM 0x0
#define XXBOW_WIDGET_MFGR_NUM 0x0
-#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
+#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
@@ -279,13 +289,13 @@ typedef volatile struct xbow_s {
#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
#define IS_XBRIDGE_XBOW(wid) \
- (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
- XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+ (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define IS_PIC_XBOW(wid) \
- (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
- XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+ (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
-#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
+#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h
index c5f4bc5cc03..2800eda0fd6 100644
--- a/arch/ia64/sn/include/xtalk/xwidgetdev.h
+++ b/arch/ia64/sn/include/xtalk/xwidgetdev.h
@@ -25,28 +25,28 @@
/* widget configuration registers */
struct widget_cfg{
- uint32_t w_id; /* 0x04 */
- uint32_t w_pad_0; /* 0x00 */
- uint32_t w_status; /* 0x0c */
- uint32_t w_pad_1; /* 0x08 */
- uint32_t w_err_upper_addr; /* 0x14 */
- uint32_t w_pad_2; /* 0x10 */
- uint32_t w_err_lower_addr; /* 0x1c */
- uint32_t w_pad_3; /* 0x18 */
- uint32_t w_control; /* 0x24 */
- uint32_t w_pad_4; /* 0x20 */
- uint32_t w_req_timeout; /* 0x2c */
- uint32_t w_pad_5; /* 0x28 */
- uint32_t w_intdest_upper_addr; /* 0x34 */
- uint32_t w_pad_6; /* 0x30 */
- uint32_t w_intdest_lower_addr; /* 0x3c */
- uint32_t w_pad_7; /* 0x38 */
- uint32_t w_err_cmd_word; /* 0x44 */
- uint32_t w_pad_8; /* 0x40 */
- uint32_t w_llp_cfg; /* 0x4c */
- uint32_t w_pad_9; /* 0x48 */
- uint32_t w_tflush; /* 0x54 */
- uint32_t w_pad_10; /* 0x50 */
+ u32 w_id; /* 0x04 */
+ u32 w_pad_0; /* 0x00 */
+ u32 w_status; /* 0x0c */
+ u32 w_pad_1; /* 0x08 */
+ u32 w_err_upper_addr; /* 0x14 */
+ u32 w_pad_2; /* 0x10 */
+ u32 w_err_lower_addr; /* 0x1c */
+ u32 w_pad_3; /* 0x18 */
+ u32 w_control; /* 0x24 */
+ u32 w_pad_4; /* 0x20 */
+ u32 w_req_timeout; /* 0x2c */
+ u32 w_pad_5; /* 0x28 */
+ u32 w_intdest_upper_addr; /* 0x34 */
+ u32 w_pad_6; /* 0x30 */
+ u32 w_intdest_lower_addr; /* 0x3c */
+ u32 w_pad_7; /* 0x38 */
+ u32 w_err_cmd_word; /* 0x44 */
+ u32 w_pad_8; /* 0x40 */
+ u32 w_llp_cfg; /* 0x4c */
+ u32 w_pad_9; /* 0x48 */
+ u32 w_tflush; /* 0x54 */
+ u32 w_pad_10; /* 0x50 */
};
/*
@@ -63,7 +63,7 @@ struct xwidget_info{
struct xwidget_hwid xwi_hwid; /* Widget Identification */
char xwi_masterxid; /* Hub's Widget Port Number */
void *xwi_hubinfo; /* Hub's provider private info */
- uint64_t *xwi_hub_provider; /* prom provider functions */
+ u64 *xwi_hub_provider; /* prom provider functions */
void *xwi_vertex;
};
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 258d9d7aff9..233d55115d3 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -132,8 +132,8 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
-sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
- u64 sn_irq_info)
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+ u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
@@ -141,7 +141,7 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
- (u64) segment, (u64) bus_number, (u64) devfn,
+ (u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
@@ -268,7 +268,7 @@ static void sn_fixup_ionodes(void)
*/
static void
sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
- int64_t * pci_addrs)
+ s64 * pci_addrs)
{
struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
unsigned int i;
@@ -328,7 +328,7 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
struct pci_bus *host_pci_bus;
struct pci_dev *host_pci_dev;
struct pcidev_info *pcidev_info;
- int64_t pci_addrs[PCI_ROM_RESOURCE + 1];
+ s64 pci_addrs[PCI_ROM_RESOURCE + 1];
struct sn_irq_info *sn_irq_info;
unsigned long size;
unsigned int bus_no, devfn;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 01d18b7b5bb..ec37084bdc1 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -28,7 +28,7 @@ extern int sn_ioif_inited;
static struct list_head **sn_irq_lh;
static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
-static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
+static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
u64 sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
@@ -123,7 +123,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list) {
- uint64_t bridge;
+ u64 bridge;
int local_widget, status;
nasid_t local_nasid;
struct sn_irq_info *new_irq_info;
@@ -134,7 +134,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
break;
memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
- bridge = (uint64_t) new_irq_info->irq_bridge;
+ bridge = (u64) new_irq_info->irq_bridge;
if (!bridge) {
kfree(new_irq_info);
break; /* irq is not a device interrupt */
@@ -349,10 +349,10 @@ static void force_interrupt(int irq)
*/
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
- uint64_t regval;
+ u64 regval;
int irr_reg_num;
int irr_bit;
- uint64_t irr_reg;
+ u64 irr_reg;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 6a7939b16a1..d263d3e8fbb 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -245,7 +245,7 @@ static int cx_device_reload(struct cx_dev *cx_dev)
cx_dev->bt);
}
-static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget,
+static inline u64 tiocx_intr_alloc(nasid_t nasid, int widget,
u64 sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
@@ -302,7 +302,7 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
{
- uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+ u64 bridge = (u64) sn_irq_info->irq_bridge;
nasid_t nasid = NASID_GET(bridge);
int widget;
@@ -313,12 +313,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
}
}
-uint64_t tiocx_dma_addr(uint64_t addr)
+u64 tiocx_dma_addr(u64 addr)
{
return PHYS_TO_TIODMA(addr);
}
-uint64_t tiocx_swin_base(int nasid)
+u64 tiocx_swin_base(int nasid)
{
return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
}
@@ -335,8 +335,8 @@ EXPORT_SYMBOL(tiocx_swin_base);
static void tio_conveyor_set(nasid_t nasid, int enable_flag)
{
- uint64_t ice_frz;
- uint64_t disable_cb = (1ull << 61);
+ u64 ice_frz;
+ u64 disable_cb = (1ull << 61);
if (!(nasid & 1))
return;
@@ -388,7 +388,7 @@ static int is_fpga_tio(int nasid, int *bt)
static int bitstream_loaded(nasid_t nasid)
{
- uint64_t cx_credits;
+ u64 cx_credits;
cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
@@ -404,14 +404,14 @@ static int tiocx_reload(struct cx_dev *cx_dev)
nasid_t nasid = cx_dev->cx_id.nasid;
if (bitstream_loaded(nasid)) {
- uint64_t cx_id;
+ u64 cx_id;
int rv;
rv = ia64_sn_sysctl_tio_clock_reset(nasid);
if (rv) {
printk(KERN_ALERT "CX port JTAG reset failed.\n");
} else {
- cx_id = *(volatile uint64_t *)
+ cx_id = *(volatile u64 *)
(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
WIDGET_ID);
part_num = XWIDGET_PART_NUM(cx_id);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index d1647b863e6..aa3fa5152a3 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -18,10 +18,10 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
- uint64_t value)
+ u64 value)
{
- uint64_t *ate = ate_resource->ate;
+ u64 *ate = ate_resource->ate;
int index;
int length = 0;
@@ -38,7 +38,7 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
- uint64_t *ate = ate_resource->ate;
+ u64 *ate = ate_resource->ate;
int index;
int start_free;
@@ -119,7 +119,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
int status = 0;
- uint64_t flag;
+ u64 flag;
flag = pcibr_lock(pcibus_info);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
@@ -139,7 +139,7 @@ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
-static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
+static inline u64 *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -153,7 +153,7 @@ static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
*/
void inline
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
- volatile uint64_t ate)
+ volatile u64 ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -171,9 +171,9 @@ ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
- volatile uint64_t ate;
+ volatile u64 ate;
int count;
- uint64_t flags;
+ u64 flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index e68332d9317..54ce5b7ceed 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -41,21 +41,21 @@ extern int sn_ioif_inited;
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ u64 paddr, size_t req_size, u64 flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
- uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
+ u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
- uint64_t ate_flags = flags | PCI32_ATE_V;
- uint64_t ate;
- uint64_t pci_addr;
- uint64_t xio_addr;
- uint64_t offset;
+ u64 ate_flags = flags | PCI32_ATE_V;
+ u64 ate;
+ u64 pci_addr;
+ u64 xio_addr;
+ u64 offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
@@ -109,12 +109,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
}
static dma_addr_t
-pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
- uint64_t dma_attributes)
+pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
+ u64 dma_attributes)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
- uint64_t pci_addr;
+ u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
@@ -127,7 +127,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
- ((uint64_t) pcibus_info->
+ ((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
@@ -142,17 +142,17 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ u64 paddr, size_t req_size, u64 flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
- uint64_t xio_addr;
+ u64 xio_addr;
- uint64_t xio_base;
- uint64_t offset;
- uint64_t endoff;
+ u64 xio_base;
+ u64 offset;
+ u64 endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
@@ -209,14 +209,14 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
-void sn_dma_flush(uint64_t addr)
+void sn_dma_flush(u64 addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
- uint64_t flags;
- uint64_t itte;
+ u64 flags;
+ u64 itte;
struct hubdev_info *hubinfo;
volatile struct sn_flush_device_kernel *p;
volatile struct sn_flush_device_common *common;
@@ -299,8 +299,8 @@ void sn_dma_flush(uint64_t addr)
* If CE ever needs the sn_dma_flush mechanism, we will have
* to account for that here and in tioce_bus_fixup().
*/
- uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
- uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
+ u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
+ u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
@@ -315,7 +315,7 @@ void sn_dma_flush(uint64_t addr)
*common->sfdl_flush_addr = 0;
/* force an interrupt. */
- *(volatile uint32_t *)(common->sfdl_force_int_addr) = 1;
+ *(volatile u32 *)(common->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
while (*(common->sfdl_flush_addr) != 0x10f)
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index e328e948175..77a1262751d 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -23,7 +23,7 @@ int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -40,7 +40,7 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -56,7 +56,7 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -159,9 +159,9 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
- kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
+ kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
memset(soft->pbi_int_ate_resource.ate, 0,
- (soft->pbi_int_ate_size * sizeof(uint64_t)));
+ (soft->pbi_int_ate_size * sizeof(u64)));
if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
/* TIO PCI Bridge: find nearest node with CPUs */
@@ -203,7 +203,7 @@ void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
- uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
+ u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 79fdb91d725..8b8bbd51d43 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -23,7 +23,7 @@ union br_ptr {
/*
* Control Register Access -- Read/Write 0000_0020
*/
-void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -43,7 +43,7 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
}
}
-void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -66,10 +66,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
-uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
+u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
@@ -96,10 +96,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
-uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
@@ -121,7 +121,7 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
-void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -141,7 +141,7 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
}
}
-void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -165,7 +165,7 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
- uint64_t addr)
+ u64 addr)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -217,10 +217,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
-uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
@@ -242,7 +242,7 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
- uint64_t val)
+ u64 val)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
@@ -262,10 +262,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
}
}
-uint64_t __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
+u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t __iomem *ret = NULL;
+ u64 __iomem *ret = NULL;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 27aa1842dac..7571a402552 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -16,7 +16,7 @@
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
-uint32_t tioca_gart_found;
+u32 tioca_gart_found;
EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
LIST_HEAD(tioca_list);
@@ -34,8 +34,8 @@ static int tioca_gart_init(struct tioca_kernel *);
static int
tioca_gart_init(struct tioca_kernel *tioca_kern)
{
- uint64_t ap_reg;
- uint64_t offset;
+ u64 ap_reg;
+ u64 offset;
struct page *tmp;
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
@@ -214,7 +214,7 @@ void
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
{
int cap_ptr;
- uint32_t reg;
+ u32 reg;
struct tioca __iomem *tioca_base;
struct pci_dev *pdev;
struct tioca_common *common;
@@ -276,7 +276,7 @@ EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
* We will always use 0x1
* 55:55 - Swap bytes Currently unused
*/
-static uint64_t
+static u64
tioca_dma_d64(unsigned long paddr)
{
dma_addr_t bus_addr;
@@ -318,15 +318,15 @@ tioca_dma_d64(unsigned long paddr)
* and so a given CA can only directly target nodes in the range
* xxx - xxx+255.
*/
-static uint64_t
-tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
+static u64
+tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
{
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
- uint64_t ct_addr;
+ u64 ct_addr;
dma_addr_t bus_addr;
- uint32_t node_upper;
- uint64_t agp_dma_extn;
+ u32 node_upper;
+ u64 agp_dma_extn;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
@@ -367,10 +367,10 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
* dma_addr_t is guarenteed to be contiguous in CA bus space.
*/
static dma_addr_t
-tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
+tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
{
int i, ps, ps_shift, entry, entries, mapsize, last_entry;
- uint64_t xio_addr, end_xio_addr;
+ u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
dma_addr_t bus_addr = 0;
@@ -514,10 +514,10 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
-static uint64_t
-tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+static u64
+tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
{
- uint64_t mapaddr;
+ u64 mapaddr;
/*
* If card is 64 or 48 bit addresable, use a direct mapping. 32
@@ -554,8 +554,8 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
- uint64_t segment;
- uint64_t busnum;
+ u64 segment;
+ u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -620,7 +620,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
tioca_kern->ca_closest_node =
nasid_to_cnodeid(tioca_common->ca_closest_nasid);
- tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
+ tioca_common->ca_kernel_private = (u64) tioca_kern;
bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
tioca_common->ca_common.bs_persist_busnum);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index dda196c9e32..e52831ed93e 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -81,10 +81,10 @@
* 61 - 0 since this is not an MSI transaction
* 60:54 - reserved, MBZ
*/
-static uint64_t
+static u64
tioce_dma_d64(unsigned long ct_addr)
{
- uint64_t bus_addr;
+ u64 bus_addr;
bus_addr = ct_addr | (1UL << 63);
@@ -141,9 +141,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
* length, and if enough resources exist, fill in the ATE's and construct a
* tioce_dmamap struct to track the mapping.
*/
-static uint64_t
+static u64
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
- uint64_t ct_addr, int len)
+ u64 ct_addr, int len)
{
int i;
int j;
@@ -152,11 +152,11 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
int entries;
int nates;
int pagesize;
- uint64_t *ate_shadow;
- uint64_t *ate_reg;
- uint64_t addr;
+ u64 *ate_shadow;
+ u64 *ate_reg;
+ u64 addr;
struct tioce *ce_mmr;
- uint64_t bus_base;
+ u64 bus_base;
struct tioce_dmamap *map;
ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
@@ -224,7 +224,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
addr = ct_addr;
for (j = 0; j < nates; j++) {
- uint64_t ate;
+ u64 ate;
ate = ATE_MAKE(addr, pagesize);
ate_shadow[i + j] = ate;
@@ -252,15 +252,15 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
*
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
*/
-static uint64_t
-tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
+static u64
+tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
{
int dma_ok;
int port;
struct tioce *ce_mmr;
struct tioce_kernel *ce_kern;
- uint64_t ct_upper;
- uint64_t ct_lower;
+ u64 ct_upper;
+ u64 ct_lower;
dma_addr_t bus_addr;
ct_upper = ct_addr & ~0x3fffffffUL;
@@ -269,7 +269,7 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
- uint64_t tmp;
+ u64 tmp;
ce_kern->ce_port[port].dirmap_shadow = ct_upper;
writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]);
@@ -295,10 +295,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
* Given a TIOCE bus address, set the appropriate bit to indicate barrier
* attributes.
*/
-static uint64_t
-tioce_dma_barrier(uint64_t bus_addr, int on)
+static u64
+tioce_dma_barrier(u64 bus_addr, int on)
{
- uint64_t barrier_bit;
+ u64 barrier_bit;
/* barrier not supported in M40/M40S mode */
if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr))
@@ -351,7 +351,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
list_for_each_entry(map, &ce_kern->ce_dmamap_list,
ce_dmamap_list) {
- uint64_t last;
+ u64 last;
last = map->pci_start + map->nbytes - 1;
if (bus_addr >= map->pci_start && bus_addr <= last)
@@ -385,17 +385,17 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* This is the main wrapper for mapping host physical pages to CE PCI space.
* The mapping mode used is based on the device's dma_mask.
*/
-static uint64_t
-tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count,
+static u64
+tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
int barrier)
{
unsigned long flags;
- uint64_t ct_addr;
- uint64_t mapaddr = 0;
+ u64 ct_addr;
+ u64 mapaddr = 0;
struct tioce_kernel *ce_kern;
struct tioce_dmamap *map;
int port;
- uint64_t dma_mask;
+ u64 dma_mask;
dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;
@@ -425,7 +425,7 @@ tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count,
* address bits than this device can support.
*/
list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {
- uint64_t last;
+ u64 last;
last = map->ct_start + map->nbytes - 1;
if (ct_addr >= map->ct_start &&
@@ -501,8 +501,8 @@ dma_map_done:
* Simply call tioce_do_dma_map() to create a map with the barrier bit clear
* in the address.
*/
-static uint64_t
-tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+static u64
+tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
{
return tioce_do_dma_map(pdev, paddr, byte_count, 0);
}
@@ -515,8 +515,8 @@ tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
*
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address.
- */ static uint64_t
-tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+ */ static u64
+tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count)
{
return tioce_do_dma_map(pdev, paddr, byte_count, 1);
}
@@ -551,7 +551,7 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
tioce_kern_init(struct tioce_common *tioce_common)
{
int i;
- uint32_t tmp;
+ u32 tmp;
struct tioce *tioce_mmr;
struct tioce_kernel *tioce_kern;
@@ -563,7 +563,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
tioce_kern->ce_common = tioce_common;
spin_lock_init(&tioce_kern->ce_lock);
INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);
- tioce_common->ce_kernel_private = (uint64_t) tioce_kern;
+ tioce_common->ce_kernel_private = (u64) tioce_kern;
/*
* Determine the secondary bus number of the port2 logical PPB.
@@ -575,7 +575,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum,
PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp);
- tioce_kern->ce_port1_secondary = (uint8_t) tmp;
+ tioce_kern->ce_port1_secondary = (u8) tmp;
/*
* Set PMU pagesize to the largest size available, and zero out
@@ -615,7 +615,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
struct pcidev_info *pcidev_info;
struct tioce_common *ce_common;
struct tioce *ce_mmr;
- uint64_t force_int_val;
+ u64 force_int_val;
if (!sn_irq_info->irq_bridge)
return;
@@ -687,7 +687,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
struct tioce_common *ce_common;
struct tioce *ce_mmr;
int bit;
- uint64_t vector;
+ u64 vector;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (!pcidev_info)
@@ -699,7 +699,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
bit = sn_irq_info->irq_int_bit;
__sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
- vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
+ vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
vector |= sn_irq_info->irq_xtalkaddr;
writeq(vector, &ce_mmr->ce_adm_int_dest[bit]);
__sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));