aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2006-04-10 21:32:46 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-04-10 21:32:46 +0100
commit1356c1948da967bc1d4c663762bfe21dfcec4b2f (patch)
tree7d7ddbaa5b9b69b53b9079bd7562eb3daf7682c4 /arch/arm/vfp
parentbb54a335ae6d282a4f177c7b35cd149aa9b0b9be (diff)
[ARM] 3473/1: Use numbers 0-15 for the VFP double registers
Patch from Catalin Marinas This patch changes the double registers numbering to 0-15 from even 0-30, in preparation for future VFP extensions. It also fixes the VFP_REG_ZERO bug (value 16 actually represents the 8th double register with the original numbering). The original mcrr/mrrc on CP10 were generating FMRRS/FMSRR instead of FMRRD/FMDRR. The patch changes to CP11 for the correct instructions. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfpdouble.c20
-rw-r--r--arch/arm/vfp/vfphw.S6
-rw-r--r--arch/arm/vfp/vfpsingle.c2
3 files changed, 11 insertions, 17 deletions
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index 2418d12e7fb..febd115dba2 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -1127,9 +1127,9 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
{
u32 op = inst & FOP_MASK;
u32 exceptions = 0;
- unsigned int dd = vfp_get_sd(inst);
- unsigned int dn = vfp_get_sn(inst);
- unsigned int dm = vfp_get_sm(inst);
+ unsigned int dd = vfp_get_dd(inst);
+ unsigned int dn = vfp_get_dn(inst);
+ unsigned int dm = vfp_get_dm(inst);
unsigned int vecitr, veclen, vecstride;
u32 (*fop)(int, int, s32, u32);
@@ -1146,7 +1146,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
- fop = (op == FOP_EXT) ? fop_extfns[dn] : fop_fns[FOP_TO_IDX(op)];
+ fop = (op == FOP_EXT) ? fop_extfns[FEXT_TO_IDX(inst)] : fop_fns[FOP_TO_IDX(op)];
if (!fop)
goto invalid;
@@ -1154,17 +1154,13 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr)
u32 except;
if (op == FOP_EXT)
- pr_debug("VFP: itr%d (d%u.%u) = op[%u] (d%u.%u)\n",
+ pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
- dd >> 1, dd & 1, dn,
- dm >> 1, dm & 1);
+ dd, dn, dm);
else
- pr_debug("VFP: itr%d (d%u.%u) = (d%u.%u) op[%u] (d%u.%u)\n",
+ pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n",
vecitr >> FPSCR_LENGTH_BIT,
- dd >> 1, dd & 1,
- dn >> 1, dn & 1,
- FOP_TO_IDX(op),
- dm >> 1, dm & 1);
+ dd, dn, FOP_TO_IDX(op), dm);
except = fop(dd, dn, dm, fpscr);
pr_debug("VFP: itr%d: exceptions=%08x\n",
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index b7ed57e00cd..a3f65b47aea 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -189,11 +189,10 @@ vfp_put_float:
.globl vfp_get_double
vfp_get_double:
- mov r0, r0, lsr #1
add pc, pc, r0, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mrrc p10, 1, r0, r1, c\dr @ fmrrd r0, r1, d\dr
+ mrrc p11, 1, r0, r1, c\dr @ fmrrd r0, r1, d\dr
mov pc, lr
.endr
@@ -204,10 +203,9 @@ vfp_get_double:
.globl vfp_put_double
vfp_put_double:
- mov r0, r0, lsr #1
add pc, pc, r0, lsl #3
mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcrr p10, 1, r1, r2, c\dr @ fmrrd r1, r2, d\dr
+ mcrr p11, 1, r1, r2, c\dr @ fmdrr r1, r2, d\dr
mov pc, lr
.endr
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index df6e5e23bcc..4ac27f19393 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -1193,7 +1193,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
(veclen >> FPSCR_LENGTH_BIT) + 1);
- fop = (op == FOP_EXT) ? fop_extfns[sn] : fop_fns[FOP_TO_IDX(op)];
+ fop = (op == FOP_EXT) ? fop_extfns[FEXT_TO_IDX(inst)] : fop_fns[FOP_TO_IDX(op)];
if (!fop)
goto invalid;