aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/arm
diff options
context:
space:
mode:
authorGravatar bunnei <bunneidev@gmail.com>2014-10-29 22:25:54 -0400
committerGravatar bunnei <bunneidev@gmail.com>2014-11-02 01:04:41 -0500
commitbc6989b0752b601762499fc1f3a0771980fd88f5 (patch)
treed07786caf1858fb41812d6b38b9865260e2178f4 /src/core/arm
parent2ca12e7f3899a58e112ee8bb4e46d11ca61f7152 (diff)
ARM: Merge latest VFP fixes from 3dmoo team.
Diffstat (limited to 'src/core/arm')
-rw-r--r--src/core/arm/skyeye_common/vfp/vfp.cpp476
-rw-r--r--src/core/arm/skyeye_common/vfp/vfp_helper.h9
-rw-r--r--src/core/arm/skyeye_common/vfp/vfpdouble.cpp2055
-rw-r--r--src/core/arm/skyeye_common/vfp/vfpsingle.cpp1944
4 files changed, 2388 insertions, 2096 deletions
diff --git a/src/core/arm/skyeye_common/vfp/vfp.cpp b/src/core/arm/skyeye_common/vfp/vfp.cpp
index e4fa3c20..454f6009 100644
--- a/src/core/arm/skyeye_common/vfp/vfp.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfp.cpp
@@ -28,230 +28,270 @@
#include "core/arm/skyeye_common/armdefs.h"
#include "core/arm/skyeye_common/vfp/vfp.h"
+#define DEBUG DBG
+
//ARMul_State* persistent_state; /* function calls from SoftFloat lib don't have an access to ARMul_state. */
unsigned
VFPInit (ARMul_State *state)
{
- state->VFP[VFP_OFFSET(VFP_FPSID)] = VFP_FPSID_IMPLMEN<<24 | VFP_FPSID_SW<<23 | VFP_FPSID_SUBARCH<<16 |
- VFP_FPSID_PARTNUM<<8 | VFP_FPSID_VARIANT<<4 | VFP_FPSID_REVISION;
- state->VFP[VFP_OFFSET(VFP_FPEXC)] = 0;
- state->VFP[VFP_OFFSET(VFP_FPSCR)] = 0;
-
- //persistent_state = state;
- /* Reset only specify VFP_FPEXC_EN = '0' */
-
- return No_exp;
+ state->VFP[VFP_OFFSET(VFP_FPSID)] = VFP_FPSID_IMPLMEN<<24 | VFP_FPSID_SW<<23 | VFP_FPSID_SUBARCH<<16 |
+ VFP_FPSID_PARTNUM<<8 | VFP_FPSID_VARIANT<<4 | VFP_FPSID_REVISION;
+ state->VFP[VFP_OFFSET(VFP_FPEXC)] = 0;
+ state->VFP[VFP_OFFSET(VFP_FPSCR)] = 0;
+
+ //persistent_state = state;
+ /* Reset only specify VFP_FPEXC_EN = '0' */
+
+ return 0;
}
unsigned
-VFPMRC (ARMul_State * state, unsigned type, ARMword instr, ARMword * value)
+VFPMRC (ARMul_State * state, unsigned type, u32 instr, u32 * value)
{
- /* MRC<c> <coproc>,<opc1>,<Rt>,<CRn>,<CRm>{,<opc2>} */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int OPC_1 = BITS (21, 23);
- int Rt = BITS (12, 15);
- int CRn = BITS (16, 19);
- int CRm = BITS (0, 3);
- int OPC_2 = BITS (5, 7);
-
- /* TODO check access permission */
-
- /* CRn/opc1 CRm/opc2 */
-
- if (CoProc == 10 || CoProc == 11)
- {
- #define VFP_MRC_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_MRC_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, CRn %x, CRm %x, OPC_2 %x\n",
- instr, CoProc, OPC_1, Rt, CRn, CRm, OPC_2);
-
- return ARMul_CANT;
+ /* MRC<c> <coproc>,<opc1>,<Rt>,<CRn>,<CRm>{,<opc2>} */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int OPC_1 = BITS (21, 23);
+ int Rt = BITS (12, 15);
+ int CRn = BITS (16, 19);
+ int CRm = BITS (0, 3);
+ int OPC_2 = BITS (5, 7);
+
+ /* TODO check access permission */
+
+ /* CRn/opc1 CRm/opc2 */
+
+ if (CoProc == 10 || CoProc == 11) {
+#define VFP_MRC_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_MRC_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, CRn %x, CRm %x, OPC_2 %x\n",
+ instr, CoProc, OPC_1, Rt, CRn, CRm, OPC_2);
+
+ return ARMul_CANT;
}
unsigned
-VFPMCR (ARMul_State * state, unsigned type, ARMword instr, ARMword value)
+VFPMCR (ARMul_State * state, unsigned type, u32 instr, u32 value)
{
- /* MCR<c> <coproc>,<opc1>,<Rt>,<CRn>,<CRm>{,<opc2>} */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int OPC_1 = BITS (21, 23);
- int Rt = BITS (12, 15);
- int CRn = BITS (16, 19);
- int CRm = BITS (0, 3);
- int OPC_2 = BITS (5, 7);
-
- /* TODO check access permission */
-
- /* CRn/opc1 CRm/opc2 */
- if (CoProc == 10 || CoProc == 11)
- {
- #define VFP_MCR_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_MCR_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, CRn %x, CRm %x, OPC_2 %x\n",
- instr, CoProc, OPC_1, Rt, CRn, CRm, OPC_2);
-
- return ARMul_CANT;
+ /* MCR<c> <coproc>,<opc1>,<Rt>,<CRn>,<CRm>{,<opc2>} */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int OPC_1 = BITS (21, 23);
+ int Rt = BITS (12, 15);
+ int CRn = BITS (16, 19);
+ int CRm = BITS (0, 3);
+ int OPC_2 = BITS (5, 7);
+
+ /* TODO check access permission */
+
+ /* CRn/opc1 CRm/opc2 */
+ if (CoProc == 10 || CoProc == 11) {
+#define VFP_MCR_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_MCR_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, CRn %x, CRm %x, OPC_2 %x\n",
+ instr, CoProc, OPC_1, Rt, CRn, CRm, OPC_2);
+
+ return ARMul_CANT;
}
unsigned
-VFPMRRC (ARMul_State * state, unsigned type, ARMword instr, ARMword * value1, ARMword * value2)
+VFPMRRC (ARMul_State * state, unsigned type, u32 instr, u32 * value1, u32 * value2)
{
- /* MCRR<c> <coproc>,<opc1>,<Rt>,<Rt2>,<CRm> */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int OPC_1 = BITS (4, 7);
- int Rt = BITS (12, 15);
- int Rt2 = BITS (16, 19);
- int CRm = BITS (0, 3);
-
- if (CoProc == 10 || CoProc == 11)
- {
- #define VFP_MRRC_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_MRRC_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, Rt2 %x, CRm %x\n",
- instr, CoProc, OPC_1, Rt, Rt2, CRm);
-
- return ARMul_CANT;
+ /* MCRR<c> <coproc>,<opc1>,<Rt>,<Rt2>,<CRm> */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int OPC_1 = BITS (4, 7);
+ int Rt = BITS (12, 15);
+ int Rt2 = BITS (16, 19);
+ int CRm = BITS (0, 3);
+
+ if (CoProc == 10 || CoProc == 11) {
+#define VFP_MRRC_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_MRRC_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, Rt2 %x, CRm %x\n",
+ instr, CoProc, OPC_1, Rt, Rt2, CRm);
+
+ return ARMul_CANT;
}
unsigned
-VFPMCRR (ARMul_State * state, unsigned type, ARMword instr, ARMword value1, ARMword value2)
+VFPMCRR (ARMul_State * state, unsigned type, u32 instr, u32 value1, u32 value2)
{
- /* MCRR<c> <coproc>,<opc1>,<Rt>,<Rt2>,<CRm> */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int OPC_1 = BITS (4, 7);
- int Rt = BITS (12, 15);
- int Rt2 = BITS (16, 19);
- int CRm = BITS (0, 3);
-
- /* TODO check access permission */
-
- /* CRn/opc1 CRm/opc2 */
-
- if (CoProc == 11 || CoProc == 10)
- {
- #define VFP_MCRR_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_MCRR_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, Rt2 %x, CRm %x\n",
- instr, CoProc, OPC_1, Rt, Rt2, CRm);
-
- return ARMul_CANT;
+ /* MCRR<c> <coproc>,<opc1>,<Rt>,<Rt2>,<CRm> */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int OPC_1 = BITS (4, 7);
+ int Rt = BITS (12, 15);
+ int Rt2 = BITS (16, 19);
+ int CRm = BITS (0, 3);
+
+ /* TODO check access permission */
+
+ /* CRn/opc1 CRm/opc2 */
+
+ if (CoProc == 11 || CoProc == 10) {
+#define VFP_MCRR_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_MCRR_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, OPC_1 %x, Rt %x, Rt2 %x, CRm %x\n",
+ instr, CoProc, OPC_1, Rt, Rt2, CRm);
+
+ return ARMul_CANT;
}
unsigned
-VFPSTC (ARMul_State * state, unsigned type, ARMword instr, ARMword * value)
+VFPSTC (ARMul_State * state, unsigned type, u32 instr, u32 * value)
{
- /* STC{L}<c> <coproc>,<CRd>,[<Rn>],<option> */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int CRd = BITS (12, 15);
- int Rn = BITS (16, 19);
- int imm8 = BITS (0, 7);
- int P = BIT(24);
- int U = BIT(23);
- int D = BIT(22);
- int W = BIT(21);
-
- /* TODO check access permission */
-
- /* VSTM */
- if ( (P|U|D|W) == 0 )
- {
- DEBUG_LOG(ARM11, "In %s, UNDEFINED\n", __FUNCTION__); exit(-1);
- }
- if (CoProc == 10 || CoProc == 11)
- {
- #if 1
- if (P == 0 && U == 0 && W == 0)
- {
- DEBUG_LOG(ARM11, "VSTM Related encodings\n"); exit(-1);
- }
- if (P == U && W == 1)
- {
- DEBUG_LOG(ARM11, "UNDEFINED\n"); exit(-1);
- }
- #endif
-
- #define VFP_STC_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_STC_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, CRd %x, Rn %x, imm8 %x, P %x, U %x, D %x, W %x\n",
- instr, CoProc, CRd, Rn, imm8, P, U, D, W);
-
- return ARMul_CANT;
+ /* STC{L}<c> <coproc>,<CRd>,[<Rn>],<option> */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int CRd = BITS (12, 15);
+ int Rn = BITS (16, 19);
+ int imm8 = BITS (0, 7);
+ int P = BIT(24);
+ int U = BIT(23);
+ int D = BIT(22);
+ int W = BIT(21);
+
+ /* TODO check access permission */
+
+ /* VSTM */
+ if ( (P|U|D|W) == 0 ) {
+ DEBUG("In %s, UNDEFINED\n", __FUNCTION__);
+ exit(-1);
+ }
+ if (CoProc == 10 || CoProc == 11) {
+#if 1
+ if (P == 0 && U == 0 && W == 0) {
+ DEBUG("VSTM Related encodings\n");
+ exit(-1);
+ }
+ if (P == U && W == 1) {
+ DEBUG("UNDEFINED\n");
+ exit(-1);
+ }
+#endif
+
+#define VFP_STC_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_STC_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, CRd %x, Rn %x, imm8 %x, P %x, U %x, D %x, W %x\n",
+ instr, CoProc, CRd, Rn, imm8, P, U, D, W);
+
+ return ARMul_CANT;
}
unsigned
-VFPLDC (ARMul_State * state, unsigned type, ARMword instr, ARMword value)
+VFPLDC (ARMul_State * state, unsigned type, u32 instr, u32 value)
{
- /* LDC{L}<c> <coproc>,<CRd>,[<Rn>] */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int CRd = BITS (12, 15);
- int Rn = BITS (16, 19);
- int imm8 = BITS (0, 7);
- int P = BIT(24);
- int U = BIT(23);
- int D = BIT(22);
- int W = BIT(21);
-
- /* TODO check access permission */
-
- if ( (P|U|D|W) == 0 )
- {
- DEBUG_LOG(ARM11, "In %s, UNDEFINED\n", __FUNCTION__); exit(-1);
- }
- if (CoProc == 10 || CoProc == 11)
- {
- #define VFP_LDC_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_LDC_TRANS
- }
- DEBUG_LOG(ARM11, "Can't identify %x, CoProc %x, CRd %x, Rn %x, imm8 %x, P %x, U %x, D %x, W %x\n",
- instr, CoProc, CRd, Rn, imm8, P, U, D, W);
-
- return ARMul_CANT;
+ /* LDC{L}<c> <coproc>,<CRd>,[<Rn>] */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int CRd = BITS (12, 15);
+ int Rn = BITS (16, 19);
+ int imm8 = BITS (0, 7);
+ int P = BIT(24);
+ int U = BIT(23);
+ int D = BIT(22);
+ int W = BIT(21);
+
+ /* TODO check access permission */
+
+ if ( (P|U|D|W) == 0 ) {
+ DEBUG("In %s, UNDEFINED\n", __FUNCTION__);
+ exit(-1);
+ }
+ if (CoProc == 10 || CoProc == 11) {
+#define VFP_LDC_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_LDC_TRANS
+ }
+ DEBUG("Can't identify %x, CoProc %x, CRd %x, Rn %x, imm8 %x, P %x, U %x, D %x, W %x\n",
+ instr, CoProc, CRd, Rn, imm8, P, U, D, W);
+
+ return ARMul_CANT;
}
unsigned
-VFPCDP (ARMul_State * state, unsigned type, ARMword instr)
+VFPCDP (ARMul_State * state, unsigned type, u32 instr)
{
- /* CDP<c> <coproc>,<opc1>,<CRd>,<CRn>,<CRm>,<opc2> */
- int CoProc = BITS (8, 11); /* 10 or 11 */
- int OPC_1 = BITS (20, 23);
- int CRd = BITS (12, 15);
- int CRn = BITS (16, 19);
- int CRm = BITS (0, 3);
- int OPC_2 = BITS (5, 7);
-
- /* TODO check access permission */
-
- /* CRn/opc1 CRm/opc2 */
-
- if (CoProc == 10 || CoProc == 11)
- {
- #define VFP_CDP_TRANS
- #include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
- #undef VFP_CDP_TRANS
-
- int exceptions = 0;
- if (CoProc == 10)
- exceptions = vfp_single_cpdo(state, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
- else
- exceptions = vfp_double_cpdo(state, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
-
- vfp_raise_exceptions(state, exceptions, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
-
- return ARMul_DONE;
- }
- DEBUG_LOG(ARM11, "Can't identify %x\n", instr);
- return ARMul_CANT;
+ /* CDP<c> <coproc>,<opc1>,<CRd>,<CRn>,<CRm>,<opc2> */
+ int CoProc = BITS (8, 11); /* 10 or 11 */
+ int OPC_1 = BITS (20, 23);
+ int CRd = BITS (12, 15);
+ int CRn = BITS (16, 19);
+ int CRm = BITS (0, 3);
+ int OPC_2 = BITS (5, 7);
+
+ //ichfly
+ /*if ((instr & 0x0FBF0FD0) == 0x0EB70AC0) //vcvt.f64.f32 d8, s16 (s is bit 0-3 and LSB bit 22) (d is bit 12 - 15 MSB is Bit 6)
+ {
+ struct vfp_double vdd;
+ struct vfp_single vsd;
+ int dn = BITS(12, 15) + (BIT(22) << 4);
+ int sd = (BITS(0, 3) << 1) + BIT(5);
+ s32 n = vfp_get_float(state, sd);
+ vfp_single_unpack(&vsd, n);
+ if (vsd.exponent & 0x80)
+ {
+ vdd.exponent = (vsd.exponent&~0x80) | 0x400;
+ }
+ else
+ {
+ vdd.exponent = vsd.exponent | 0x380;
+ }
+ vdd.sign = vsd.sign;
+ vdd.significand = (u64)(vsd.significand & ~0xC0000000) << 32; // I have no idea why but the 2 uppern bits are not from the significand
+ vfp_put_double(state, vfp_double_pack(&vdd), dn);
+ return ARMul_DONE;
+ }
+ if ((instr & 0x0FBF0FD0) == 0x0EB70BC0) //vcvt.f32.f64 s15, d6
+ {
+ struct vfp_double vdd;
+ struct vfp_single vsd;
+ int sd = BITS(0, 3) + (BIT(5) << 4);
+ int dn = (BITS(12, 15) << 1) + BIT(22);
+ vfp_double_unpack(&vdd, vfp_get_double(state, sd));
+ if (vdd.exponent & 0x400) //todo if the exponent is to low or to high for this convert
+ {
+ vsd.exponent = (vdd.exponent) | 0x80;
+ }
+ else
+ {
+ vsd.exponent = vdd.exponent & ~0x80;
+ }
+ vsd.exponent &= 0xFF;
+ // vsd.exponent = vdd.exponent >> 3;
+ vsd.sign = vdd.sign;
+ vsd.significand = ((u64)(vdd.significand ) >> 32)& ~0xC0000000;
+ vfp_put_float(state, vfp_single_pack(&vsd), dn);
+ return ARMul_DONE;
+ }*/
+
+ /* TODO check access permission */
+
+ /* CRn/opc1 CRm/opc2 */
+
+ if (CoProc == 10 || CoProc == 11) {
+#define VFP_CDP_TRANS
+#include "core/arm/skyeye_common/vfp/vfpinstr.cpp"
+#undef VFP_CDP_TRANS
+
+ int exceptions = 0;
+ if (CoProc == 10)
+ exceptions = vfp_single_cpdo(state, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
+ else
+ exceptions = vfp_double_cpdo(state, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
+
+ vfp_raise_exceptions(state, exceptions, instr, state->VFP[VFP_OFFSET(VFP_FPSCR)]);
+
+ return ARMul_DONE;
+ }
+ DEBUG("Can't identify %x\n", instr);
+ return ARMul_CANT;
}
@@ -301,29 +341,29 @@ VFPCDP (ARMul_State * state, unsigned type, ARMword instr)
/* Miscellaneous functions */
int32_t vfp_get_float(arm_core_t* state, unsigned int reg)
{
- DBG("VFP get float: s%d=[%08x]\n", reg, state->ExtReg[reg]);
- return state->ExtReg[reg];
+ DEBUG("VFP get float: s%d=[%08x]\n", reg, state->ExtReg[reg]);
+ return state->ExtReg[reg];
}
void vfp_put_float(arm_core_t* state, int32_t val, unsigned int reg)
{
- DBG("VFP put float: s%d <= [%08x]\n", reg, val);
- state->ExtReg[reg] = val;
+ DEBUG("VFP put float: s%d <= [%08x]\n", reg, val);
+ state->ExtReg[reg] = val;
}
uint64_t vfp_get_double(arm_core_t* state, unsigned int reg)
{
- uint64_t result;
- result = ((uint64_t) state->ExtReg[reg*2+1])<<32 | state->ExtReg[reg*2];
- DBG("VFP get double: s[%d-%d]=[%016llx]\n", reg*2+1, reg*2, result);
- return result;
+ uint64_t result;
+ result = ((uint64_t) state->ExtReg[reg*2+1])<<32 | state->ExtReg[reg*2];
+ DEBUG("VFP get double: s[%d-%d]=[%016llx]\n", reg*2+1, reg*2, result);
+ return result;
}
void vfp_put_double(arm_core_t* state, uint64_t val, unsigned int reg)
{
- DBG("VFP put double: s[%d-%d] <= [%08x-%08x]\n", reg*2+1, reg*2, (uint32_t) (val>>32), (uint32_t) (val & 0xffffffff));
- state->ExtReg[reg*2] = (uint32_t) (val & 0xffffffff);
- state->ExtReg[reg*2+1] = (uint32_t) (val>>32);
+ DEBUG("VFP put double: s[%d-%d] <= [%08x-%08x]\n", reg*2+1, reg*2, (uint32_t) (val>>32), (uint32_t) (val & 0xffffffff));
+ state->ExtReg[reg*2] = (uint32_t) (val & 0xffffffff);
+ state->ExtReg[reg*2+1] = (uint32_t) (val>>32);
}
@@ -333,25 +373,25 @@ void vfp_put_double(arm_core_t* state, uint64_t val, unsigned int reg)
*/
void vfp_raise_exceptions(ARMul_State* state, u32 exceptions, u32 inst, u32 fpscr)
{
- int si_code = 0;
+ int si_code = 0;
- vfpdebug("VFP: raising exceptions %08x\n", exceptions);
+ vfpdebug("VFP: raising exceptions %08x\n", exceptions);
- if (exceptions == VFP_EXCEPTION_ERROR) {
- DEBUG_LOG(ARM11, "unhandled bounce %x\n", inst);
- exit(-1);
- return;
- }
+ if (exceptions == VFP_EXCEPTION_ERROR) {
+ DEBUG("unhandled bounce %x\n", inst);
+ exit(-1);
+ return;
+ }
- /*
- * If any of the status flags are set, update the FPSCR.
- * Comparison instructions always return at least one of
- * these flags set.
- */
- if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
- fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
+ /*
+ * If any of the status flags are set, update the FPSCR.
+ * Comparison instructions always return at least one of
+ * these flags set.
+ */
+ if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
+ fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
- fpscr |= exceptions;
+ fpscr |= exceptions;
- state->VFP[VFP_OFFSET(VFP_FPSCR)] = fpscr;
+ state->VFP[VFP_OFFSET(VFP_FPSCR)] = fpscr;
}
diff --git a/src/core/arm/skyeye_common/vfp/vfp_helper.h b/src/core/arm/skyeye_common/vfp/vfp_helper.h
index 5076e59f..f0896fc8 100644
--- a/src/core/arm/skyeye_common/vfp/vfp_helper.h
+++ b/src/core/arm/skyeye_common/vfp/vfp_helper.h
@@ -44,7 +44,7 @@
#define pr_info //printf
#define pr_debug //printf
-static u32 vfp_fls(int x);
+static u32 fls(int x);
#define do_div(n, base) {n/=base;}
/* From vfpinstr.h */
@@ -502,7 +502,7 @@ struct op {
u32 flags;
};
-static u32 vfp_fls(int x)
+static u32 fls(int x)
{
int r = 32;
@@ -532,4 +532,9 @@ static u32 vfp_fls(int x)
}
+u32 vfp_double_normaliseroundintern(ARMul_State* state, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func);
+u32 vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr);
+u32 vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn, struct vfp_double *vdm, u32 fpscr);
+u32 vfp_double_fcvtsinterncutting(ARMul_State* state, int sd, struct vfp_double* dm, u32 fpscr);
+
#endif
diff --git a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
index 13411ad8..765c1f6b 100644
--- a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
@@ -56,163 +56,291 @@
#include "core/arm/skyeye_common/vfp/asm_vfp.h"
static struct vfp_double vfp_double_default_qnan = {
- //.exponent = 2047,
- //.sign = 0,
- //.significand = VFP_DOUBLE_SIGNIFICAND_QNAN,
+ 2047,
+ 0,
+ VFP_DOUBLE_SIGNIFICAND_QNAN,
};
static void vfp_double_dump(const char *str, struct vfp_double *d)
{
- pr_debug("VFP: %s: sign=%d exponent=%d significand=%016llx\n",
- str, d->sign != 0, d->exponent, d->significand);
+ pr_debug("VFP: %s: sign=%d exponent=%d significand=%016llx\n",
+ str, d->sign != 0, d->exponent, d->significand);
}
static void vfp_double_normalise_denormal(struct vfp_double *vd)
{
- int bits = 31 - vfp_fls(vd->significand >> 32);
- if (bits == 31)
- bits = 63 - vfp_fls(vd->significand);
+ int bits = 31 - fls((ARMword)(vd->significand >> 32));
+ if (bits == 31)
+ bits = 63 - fls((ARMword)vd->significand);
- vfp_double_dump("normalise_denormal: in", vd);
+ vfp_double_dump("normalise_denormal: in", vd);
- if (bits) {
- vd->exponent -= bits - 1;
- vd->significand <<= bits;
- }
+ if (bits) {
+ vd->exponent -= bits - 1;
+ vd->significand <<= bits;
+ }
- vfp_double_dump("normalise_denormal: out", vd);
+ vfp_double_dump("normalise_denormal: out", vd);
}
-u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
+u32 vfp_double_normaliseroundintern(ARMul_State* state, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
{
- u64 significand, incr;
- int exponent, shift, underflow;
- u32 rmode;
-
- vfp_double_dump("pack: in", vd);
-
- /*
- * Infinities and NaNs are a special case.
- */
- if (vd->exponent == 2047 && (vd->significand == 0 || exceptions))
- goto pack;
-
- /*
- * Special-case zero.
- */
- if (vd->significand == 0) {
- vd->exponent = 0;
- goto pack;
- }
-
- exponent = vd->exponent;
- significand = vd->significand;
-
- shift = 32 - vfp_fls(significand >> 32);
- if (shift == 32)
- shift = 64 - vfp_fls(significand);
- if (shift) {
- exponent -= shift;
- significand <<= shift;
- }
+ u64 significand, incr;
+ int exponent, shift, underflow;
+ u32 rmode;
+
+ vfp_double_dump("pack: in", vd);
+
+ /*
+ * Infinities and NaNs are a special case.
+ */
+ if (vd->exponent == 2047 && (vd->significand == 0 || exceptions))
+ goto pack;
+
+ /*
+ * Special-case zero.
+ */
+ if (vd->significand == 0) {
+ vd->exponent = 0;
+ goto pack;
+ }
+
+ exponent = vd->exponent;
+ significand = vd->significand;
+
+ shift = 32 - fls((ARMword)(significand >> 32));
+ if (shift == 32)
+ shift = 64 - fls((ARMword)significand);
+ if (shift) {
+ exponent -= shift;
+ significand <<= shift;
+ }
#if 1
- vd->exponent = exponent;
- vd->significand = significand;
- vfp_double_dump("pack: normalised", vd);
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: normalised", vd);
#endif
- /*
- * Tiny number?
- */
- underflow = exponent < 0;
- if (underflow) {
- significand = vfp_shiftright64jamming(significand, -exponent);
- exponent = 0;
+ /*
+ * Tiny number?
+ */
+ underflow = exponent < 0;
+ if (underflow) {
+ significand = vfp_shiftright64jamming(significand, -exponent);
+ exponent = 0;
#if 1
- vd->exponent = exponent;
- vd->significand = significand;
- vfp_double_dump("pack: tiny number", vd);
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: tiny number", vd);
#endif
- if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1)))
- underflow = 0;
- }
-
- /*
- * Select rounding increment.
- */
- incr = 0;
- rmode = fpscr & FPSCR_RMODE_MASK;
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 1ULL << VFP_DOUBLE_LOW_BITS;
- if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0))
- incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1;
-
- pr_debug("VFP: rounding increment = 0x%08llx\n", incr);
-
- /*
- * Is our rounding going to overflow?
- */
- if ((significand + incr) < significand) {
- exponent += 1;
- significand = (significand >> 1) | (significand & 1);
- incr >>= 1;
+ if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1)))
+ underflow = 0;
+ }
+
+ /*
+ * Select rounding increment.
+ */
+ incr = 0;
+ rmode = fpscr & FPSCR_RMODE_MASK;
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 1ULL << VFP_DOUBLE_LOW_BITS;
+ if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0)
+ incr -= 1;
+ }
+ else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ }
+ else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0))
+ incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1;
+
+ pr_debug("VFP: rounding increment = 0x%08llx\n", incr);
+
+ /*
+ * Is our rounding going to overflow?
+ */
+ if ((significand + incr) < significand) {
+ exponent += 1;
+ significand = (significand >> 1) | (significand & 1);
+ incr >>= 1;
#if 1
- vd->exponent = exponent;
- vd->significand = significand;
- vfp_double_dump("pack: overflow", vd);
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: overflow", vd);
#endif
- }
-
- /*
- * If any of the low bits (which will be shifted out of the
- * number) are non-zero, the result is inexact.
- */
- if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1))
- exceptions |= FPSCR_IXC;
-
- /*
- * Do our rounding.
- */
- significand += incr;
-
- /*
- * Infinity?
- */
- if (exponent >= 2046) {
- exceptions |= FPSCR_OFC | FPSCR_IXC;
- if (incr == 0) {
- vd->exponent = 2045;
- vd->significand = 0x7fffffffffffffffULL;
- } else {
- vd->exponent = 2047; /* infinity */
- vd->significand = 0;
- }
- } else {
- if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0)
- exponent = 0;
- if (exponent || significand > 0x8000000000000000ULL)
- underflow = 0;
- if (underflow)
- exceptions |= FPSCR_UFC;
- vd->exponent = exponent;
- vd->significand = significand >> 1;
- }
-
+ }
+
+ /*
+ * If any of the low bits (which will be shifted out of the
+ * number) are non-zero, the result is inexact.
+ */
+ if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1))
+ exceptions |= FPSCR_IXC;
+
+ /*
+ * Do our rounding.
+ */
+ significand += incr;
+
+ /*
+ * Infinity?
+ */
+ if (exponent >= 2046) {
+ exceptions |= FPSCR_OFC | FPSCR_IXC;
+ if (incr == 0) {
+ vd->exponent = 2045;
+ vd->significand = 0x7fffffffffffffffULL;
+ }
+ else {
+ vd->exponent = 2047; /* infinity */
+ vd->significand = 0;
+ }
+ }
+ else {
+ if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0)
+ exponent = 0;
+ if (exponent || significand > 0x8000000000000000ULL)
+ underflow = 0;
+ if (underflow)
+ exceptions |= FPSCR_UFC;
+ vd->exponent = exponent;
+ vd->significand = significand >> 1;
+ }
pack:
- vfp_double_dump("pack: final", vd);
- {
- s64 d = vfp_double_pack(vd);
- pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func,
- dd, d, exceptions);
- vfp_put_double(state, d, dd);
- }
- return exceptions;
+ return 0;
+}
+
+u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
+{
+ u64 significand, incr;
+ int exponent, shift, underflow;
+ u32 rmode;
+
+ vfp_double_dump("pack: in", vd);
+
+ /*
+ * Infinities and NaNs are a special case.
+ */
+ if (vd->exponent == 2047 && (vd->significand == 0 || exceptions))
+ goto pack;
+
+ /*
+ * Special-case zero.
+ */
+ if (vd->significand == 0) {
+ vd->exponent = 0;
+ goto pack;
+ }
+
+ exponent = vd->exponent;
+ significand = vd->significand;
+
+ shift = 32 - fls((ARMword)(significand >> 32));
+ if (shift == 32)
+ shift = 64 - fls((ARMword)significand);
+ if (shift) {
+ exponent -= shift;
+ significand <<= shift;
+ }
+
+#if 1
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: normalised", vd);
+#endif
+
+ /*
+ * Tiny number?
+ */
+ underflow = exponent < 0;
+ if (underflow) {
+ significand = vfp_shiftright64jamming(significand, -exponent);
+ exponent = 0;
+#if 1
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: tiny number", vd);
+#endif
+ if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1)))
+ underflow = 0;
+ }
+
+ /*
+ * Select rounding increment.
+ */
+ incr = 0;
+ rmode = fpscr & FPSCR_RMODE_MASK;
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 1ULL << VFP_DOUBLE_LOW_BITS;
+ if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0))
+ incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1;
+
+ pr_debug("VFP: rounding increment = 0x%08llx\n", incr);
+
+ /*
+ * Is our rounding going to overflow?
+ */
+ if ((significand + incr) < significand) {
+ exponent += 1;
+ significand = (significand >> 1) | (significand & 1);
+ incr >>= 1;
+#if 1
+ vd->exponent = exponent;
+ vd->significand = significand;
+ vfp_double_dump("pack: overflow", vd);
+#endif
+ }
+
+ /*
+ * If any of the low bits (which will be shifted out of the
+ * number) are non-zero, the result is inexact.
+ */
+ if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1))
+ exceptions |= FPSCR_IXC;
+
+ /*
+ * Do our rounding.
+ */
+ significand += incr;
+
+ /*
+ * Infinity?
+ */
+ if (exponent >= 2046) {
+ exceptions |= FPSCR_OFC | FPSCR_IXC;
+ if (incr == 0) {
+ vd->exponent = 2045;
+ vd->significand = 0x7fffffffffffffffULL;
+ } else {
+ vd->exponent = 2047; /* infinity */
+ vd->significand = 0;
+ }
+ } else {
+ if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0)
+ exponent = 0;
+ if (exponent || significand > 0x8000000000000000ULL)
+ underflow = 0;
+ if (underflow)
+ exceptions |= FPSCR_UFC;
+ vd->exponent = exponent;
+ vd->significand = significand >> 1;
+ }
+
+pack:
+ vfp_double_dump("pack: final", vd);
+ {
+ s64 d = vfp_double_pack(vd);
+ pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func,
+ dd, d, exceptions);
+ vfp_put_double(state, d, dd);
+ }
+ return exceptions;
}
/*
@@ -221,43 +349,43 @@ u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd,
*/
static u32
vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
- struct vfp_double *vdm, u32 fpscr)
+ struct vfp_double *vdm, u32 fpscr)
{
- struct vfp_double *nan;
- int tn, tm = 0;
-
- tn = vfp_double_type(vdn);
-
- if (vdm)
- tm = vfp_double_type(vdm);
-
- if (fpscr & FPSCR_DEFAULT_NAN)
- /*
- * Default NaN mode - always returns a quiet NaN
- */
- nan = &vfp_double_default_qnan;
- else {
- /*
- * Contemporary mode - select the first signalling
- * NAN, or if neither are signalling, the first
- * quiet NAN.
- */
- if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
- nan = vdn;
- else
- nan = vdm;
- /*
- * Make the NaN quiet.
- */
- nan->significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
- }
-
- *vdd = *nan;
-
- /*
- * If one was a signalling NAN, raise invalid operation.
- */
- return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
+ struct vfp_double *nan;
+ int tn, tm = 0;
+
+ tn = vfp_double_type(vdn);
+
+ if (vdm)
+ tm = vfp_double_type(vdm);
+
+ if (fpscr & FPSCR_DEFAULT_NAN)
+ /*
+ * Default NaN mode - always returns a quiet NaN
+ */
+ nan = &vfp_double_default_qnan;
+ else {
+ /*
+ * Contemporary mode - select the first signalling
+ * NAN, or if neither are signalling, the first
+ * quiet NAN.
+ */
+ if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
+ nan = vdn;
+ else
+ nan = vdm;
+ /*
+ * Make the NaN quiet.
+ */
+ nan->significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
+ }
+
+ *vdd = *nan;
+
+ /*
+ * If one was a signalling NAN, raise invalid operation.
+ */
+ return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
}
/*
@@ -265,108 +393,108 @@ vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
*/
static u32 vfp_double_fabs(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- vfp_put_double(state, vfp_double_packed_abs(vfp_get_double(state, dm)), dd);
- return 0;
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_put_double(state, vfp_double_packed_abs(vfp_get_double(state, dm)), dd);
+ return 0;
}
static u32 vfp_double_fcpy(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- vfp_put_double(state, vfp_get_double(state, dm), dd);
- return 0;
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_put_double(state, vfp_get_double(state, dm), dd);
+ return 0;
}
static u32 vfp_double_fneg(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- vfp_put_double(state, vfp_double_packed_negate(vfp_get_double(state, dm)), dd);
- return 0;
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_put_double(state, vfp_double_packed_negate(vfp_get_double(state, dm)), dd);
+ return 0;
}
static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- struct vfp_double vdm, vdd, *vdp;
- int ret, tm;
-
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- tm = vfp_double_type(&vdm);
- if (tm & (VFP_NAN|VFP_INFINITY)) {
- vdp = &vdd;
-
- if (tm & VFP_NAN)
- ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr);
- else if (vdm.sign == 0) {
- sqrt_copy:
- vdp = &vdm;
- ret = 0;
- } else {
- sqrt_invalid:
- vdp = &vfp_double_default_qnan;
- ret = FPSCR_IOC;
- }
- vfp_put_double(state, vfp_double_pack(vdp), dd);
- return ret;
- }
-
- /*
- * sqrt(+/- 0) == +/- 0
- */
- if (tm & VFP_ZERO)
- goto sqrt_copy;
-
- /*
- * Normalise a denormalised number
- */
- if (tm & VFP_DENORMAL)
- vfp_double_normalise_denormal(&vdm);
-
- /*
- * sqrt(<0) = invalid
- */
- if (vdm.sign)
- goto sqrt_invalid;
-
- vfp_double_dump("sqrt", &vdm);
-
- /*
- * Estimate the square root.
- */
- vdd.sign = 0;
- vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023;
- vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31;
-
- vfp_double_dump("sqrt estimate1", &vdd);
-
- vdm.significand >>= 1 + (vdm.exponent & 1);
- vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand);
-
- vfp_double_dump("sqrt estimate2", &vdd);
-
- /*
- * And now adjust.
- */
- if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) {
- if (vdd.significand < 2) {
- vdd.significand = ~0ULL;
- } else {
- u64 termh, terml, remh, reml;
- vdm.significand <<= 2;
- mul64to128(&termh, &terml, vdd.significand, vdd.significand);
- sub128(&remh, &reml, vdm.significand, 0, termh, terml);
- while ((s64)remh < 0) {
- vdd.significand -= 1;
- shift64left(&termh, &terml, vdd.significand);
- terml |= 1;
- add128(&remh, &reml, remh, reml, termh, terml);
- }
- vdd.significand |= (remh | reml) != 0;
- }
- }
- vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
-
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt");
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double vdm, vdd, *vdp;
+ int ret, tm;
+
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ tm = vfp_double_type(&vdm);
+ if (tm & (VFP_NAN|VFP_INFINITY)) {
+ vdp = &vdd;
+
+ if (tm & VFP_NAN)
+ ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr);
+ else if (vdm.sign == 0) {
+sqrt_copy:
+ vdp = &vdm;
+ ret = 0;
+ } else {
+sqrt_invalid:
+ vdp = &vfp_double_default_qnan;
+ ret = FPSCR_IOC;
+ }
+ vfp_put_double(state, vfp_double_pack(vdp), dd);
+ return ret;
+ }
+
+ /*
+ * sqrt(+/- 0) == +/- 0
+ */
+ if (tm & VFP_ZERO)
+ goto sqrt_copy;
+
+ /*
+ * Normalise a denormalised number
+ */
+ if (tm & VFP_DENORMAL)
+ vfp_double_normalise_denormal(&vdm);
+
+ /*
+ * sqrt(<0) = invalid
+ */
+ if (vdm.sign)
+ goto sqrt_invalid;
+
+ vfp_double_dump("sqrt", &vdm);
+
+ /*
+ * Estimate the square root.
+ */
+ vdd.sign = 0;
+ vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023;
+ vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31;
+
+ vfp_double_dump("sqrt estimate1", &vdd);
+
+ vdm.significand >>= 1 + (vdm.exponent & 1);
+ vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand);
+
+ vfp_double_dump("sqrt estimate2", &vdd);
+
+ /*
+ * And now adjust.
+ */
+ if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) {
+ if (vdd.significand < 2) {
+ vdd.significand = ~0ULL;
+ } else {
+ u64 termh, terml, remh, reml;
+ vdm.significand <<= 2;
+ mul64to128(&termh, &terml, vdd.significand, vdd.significand);
+ sub128(&remh, &reml, vdm.significand, 0, termh, terml);
+ while ((s64)remh < 0) {
+ vdd.significand -= 1;
+ shift64left(&termh, &terml, vdd.significand);
+ terml |= 1;
+ add128(&remh, &reml, remh, reml, termh, terml);
+ }
+ vdd.significand |= (remh | reml) != 0;
+ }
+ }
+ vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
+
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt");
}
/*
@@ -377,319 +505,362 @@ static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32
*/
static u32 vfp_compare(ARMul_State* state, int dd, int signal_on_qnan, int dm, u32 fpscr)
{
- s64 d, m;
- u32 ret = 0;
-
- pr_debug("In %s, state=0x%x, fpscr=0x%x\n", __FUNCTION__, state, fpscr);
- m = vfp_get_double(state, dm);
- if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) {
- ret |= FPSCR_C | FPSCR_V;
- if (signal_on_qnan || !(vfp_double_packed_mantissa(m) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
- /*
- * Signalling NaN, or signalling on quiet NaN
- */
- ret |= FPSCR_IOC;
- }
-
- d = vfp_get_double(state, dd);
- if (vfp_double_packed_exponent(d) == 2047 && vfp_double_packed_mantissa(d)) {
- ret |= FPSCR_C | FPSCR_V;
- if (signal_on_qnan || !(vfp_double_packed_mantissa(d) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
- /*
- * Signalling NaN, or signalling on quiet NaN
- */
- ret |= FPSCR_IOC;
- }
-
- if (ret == 0) {
- //printf("In %s, d=%lld, m =%lld\n ", __FUNCTION__, d, m);
- if (d == m || vfp_double_packed_abs(d | m) == 0) {
- /*
- * equal
- */
- ret |= FPSCR_Z | FPSCR_C;
- //printf("In %s,1 ret=0x%x\n", __FUNCTION__, ret);
- } else if (vfp_double_packed_sign(d ^ m)) {
- /*
- * different signs
- */
- if (vfp_double_packed_sign(d))
- /*
- * d is negative, so d < m
- */
- ret |= FPSCR_N;
- else
- /*
- * d is positive, so d > m
- */
- ret |= FPSCR_C;
- } else if ((vfp_double_packed_sign(d) != 0) ^ (d < m)) {
- /*
- * d < m
- */
- ret |= FPSCR_N;
- } else if ((vfp_double_packed_sign(d) != 0) ^ (d > m)) {
- /*
- * d > m
- */
- ret |= FPSCR_C;
- }
- }
- pr_debug("In %s, state=0x%x, ret=0x%x\n", __FUNCTION__, state, ret);
-
- return ret;
+ s64 d, m;
+ u32 ret = 0;
+
+ pr_debug("In %s, state=0x%x, fpscr=0x%x\n", __FUNCTION__, state, fpscr);
+ m = vfp_get_double(state, dm);
+ if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) {
+ ret |= FPSCR_C | FPSCR_V;
+ if (signal_on_qnan || !(vfp_double_packed_mantissa(m) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
+ /*
+ * Signalling NaN, or signalling on quiet NaN
+ */
+ ret |= FPSCR_IOC;
+ }
+
+ d = vfp_get_double(state, dd);
+ if (vfp_double_packed_exponent(d) == 2047 && vfp_double_packed_mantissa(d)) {
+ ret |= FPSCR_C | FPSCR_V;
+ if (signal_on_qnan || !(vfp_double_packed_mantissa(d) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1))))
+ /*
+ * Signalling NaN, or signalling on quiet NaN
+ */
+ ret |= FPSCR_IOC;
+ }
+
+ if (ret == 0) {
+ //printf("In %s, d=%lld, m =%lld\n ", __FUNCTION__, d, m);
+ if (d == m || vfp_double_packed_abs(d | m) == 0) {
+ /*
+ * equal
+ */
+ ret |= FPSCR_Z | FPSCR_C;
+ //printf("In %s,1 ret=0x%x\n", __FUNCTION__, ret);
+ } else if (vfp_double_packed_sign(d ^ m)) {
+ /*
+ * different signs
+ */
+ if (vfp_double_packed_sign(d))
+ /*
+ * d is negative, so d < m
+ */
+ ret |= FPSCR_N;
+ else
+ /*
+ * d is positive, so d > m
+ */
+ ret |= FPSCR_C;
+ } else if ((vfp_double_packed_sign(d) != 0) ^ (d < m)) {
+ /*
+ * d < m
+ */
+ ret |= FPSCR_N;
+ } else if ((vfp_double_packed_sign(d) != 0) ^ (d > m)) {
+ /*
+ * d > m
+ */
+ ret |= FPSCR_C;
+ }
+ }
+ pr_debug("In %s, state=0x%x, ret=0x%x\n", __FUNCTION__, state, ret);
+
+ return ret;
}
static u32 vfp_double_fcmp(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_compare(state, dd, 0, dm, fpscr);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_compare(state, dd, 0, dm, fpscr);
}
static u32 vfp_double_fcmpe(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_compare(state, dd, 1, dm, fpscr);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_compare(state, dd, 1, dm, fpscr);
}
static u32 vfp_double_fcmpz(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_compare(state, dd, 0, VFP_REG_ZERO, fpscr);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_compare(state, dd, 0, VFP_REG_ZERO, fpscr);
}
static u32 vfp_double_fcmpez(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_compare(state, dd, 1, VFP_REG_ZERO, fpscr);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_compare(state, dd, 1, VFP_REG_ZERO, fpscr);
+}
+
+u32 vfp_double_fcvtsinterncutting(ARMul_State* state, int sd, struct vfp_double* dm, u32 fpscr) //ichfly for internal use only
+{
+ struct vfp_single vsd;
+ int tm;
+ u32 exceptions = 0;
+
+ pr_debug("In %s\n", __FUNCTION__);
+
+ tm = vfp_double_type(dm);
+
+ /*
+ * If we have a signalling NaN, signal invalid operation.
+ */
+ if (tm == VFP_SNAN)
+ exceptions = FPSCR_IOC;
+
+ if (tm & VFP_DENORMAL)
+ vfp_double_normalise_denormal(dm);
+
+ vsd.sign = dm->sign;
+ vsd.significand = vfp_hi64to32jamming(dm->significand);
+
+ /*
+ * If we have an infinity or a NaN, the exponent must be 255
+ */
+ if (tm & (VFP_INFINITY | VFP_NAN)) {
+ vsd.exponent = 255;
+ if (tm == VFP_QNAN)
+ vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
+ goto pack_nan;
+ }
+ else if (tm & VFP_ZERO)
+ vsd.exponent = 0;
+ else
+ vsd.exponent = dm->exponent - (1023 - 127);
+
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts");
+
+pack_nan:
+ vfp_put_float(state, vfp_single_pack(&vsd), sd);
+ return exceptions;
}
static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
{
- struct vfp_double vdm;
- struct vfp_single vsd;
- int tm;
- u32 exceptions = 0;
-
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
-
- tm = vfp_double_type(&vdm);
-
- /*
- * If we have a signalling NaN, signal invalid operation.
- */
- if (tm == VFP_SNAN)
- exceptions = FPSCR_IOC;
-
- if (tm & VFP_DENORMAL)
- vfp_double_normalise_denormal(&vdm);
-
- vsd.sign = vdm.sign;
- vsd.significand = vfp_hi64to32jamming(vdm.significand);
-
- /*
- * If we have an infinity or a NaN, the exponent must be 255
- */
- if (tm & (VFP_INFINITY|VFP_NAN)) {
- vsd.exponent = 255;
- if (tm == VFP_QNAN)
- vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
- goto pack_nan;
- } else if (tm & VFP_ZERO)
- vsd.exponent = 0;
- else
- vsd.exponent = vdm.exponent - (1023 - 127);
-
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts");
-
- pack_nan:
- vfp_put_float(state, vfp_single_pack(&vsd), sd);
- return exceptions;
+ struct vfp_double vdm;
+ struct vfp_single vsd;
+ int tm;
+ u32 exceptions = 0;
+
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+
+ tm = vfp_double_type(&vdm);
+
+ /*
+ * If we have a signalling NaN, signal invalid operation.
+ */
+ if (tm == VFP_SNAN)
+ exceptions = FPSCR_IOC;
+
+ if (tm & VFP_DENORMAL)
+ vfp_double_normalise_denormal(&vdm);
+
+ vsd.sign = vdm.sign;
+ vsd.significand = vfp_hi64to32jamming(vdm.significand);
+
+ /*
+ * If we have an infinity or a NaN, the exponent must be 255
+ */
+ if (tm & (VFP_INFINITY|VFP_NAN)) {
+ vsd.exponent = 255;
+ if (tm == VFP_QNAN)
+ vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
+ goto pack_nan;
+ } else if (tm & VFP_ZERO)
+ vsd.exponent = 0;
+ else
+ vsd.exponent = vdm.exponent - (1023 - 127);
+
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts");
+
+pack_nan:
+ vfp_put_float(state, vfp_single_pack(&vsd), sd);
+ return exceptions;
}
static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- struct vfp_double vdm;
- u32 m = vfp_get_float(state, dm);
+ struct vfp_double vdm;
+ u32 m = vfp_get_float(state, dm);
- pr_debug("In %s\n", __FUNCTION__);
- vdm.sign = 0;
- vdm.exponent = 1023 + 63 - 1;
- vdm.significand = (u64)m;
+ pr_debug("In %s\n", __FUNCTION__);
+ vdm.sign = 0;
+ vdm.exponent = 1023 + 63 - 1;
+ vdm.significand = (u64)m;
- return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fuito");
+ return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fuito");
}
static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- struct vfp_double vdm;
- u32 m = vfp_get_float(state, dm);
+ struct vfp_double vdm;
+ u32 m = vfp_get_float(state, dm);
- pr_debug("In %s\n", __FUNCTION__);
- vdm.sign = (m & 0x80000000) >> 16;
- vdm.exponent = 1023 + 63 - 1;
- vdm.significand = vdm.sign ? -m : m;
+ pr_debug("In %s\n", __FUNCTION__);
+ vdm.sign = (m & 0x80000000) >> 16;
+ vdm.exponent = 1023 + 63 - 1;
+ vdm.significand = vdm.sign ? -m : m;
- return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fsito");
+ return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fsito");
}
static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
{
- struct vfp_double vdm;
- u32 d, exceptions = 0;
- int rmode = fpscr & FPSCR_RMODE_MASK;
- int tm;
-
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
-
- /*
- * Do we have a denormalised number?
- */
- tm = vfp_double_type(&vdm);
- if (tm & VFP_DENORMAL)
- exceptions |= FPSCR_IDC;
-
- if (tm & VFP_NAN)
- vdm.sign = 0;
-
- if (vdm.exponent >= 1023 + 32) {
- d = vdm.sign ? 0 : 0xffffffff;
- exceptions = FPSCR_IOC;
- } else if (vdm.exponent >= 1023 - 1) {
- int shift = 1023 + 63 - vdm.exponent;
- u64 rem, incr = 0;
-
- /*
- * 2^0 <= m < 2^32-2^8
- */
- d = (vdm.significand << 1) >> shift;
- rem = vdm.significand << (65 - shift);
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 0x8000000000000000ULL;
- if ((d & 1) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
- incr = ~0ULL;
- }
-
- if ((rem + incr) < rem) {
- if (d < 0xffffffff)
- d += 1;
- else
- exceptions |= FPSCR_IOC;
- }
-
- if (d && vdm.sign) {
- d = 0;
- exceptions |= FPSCR_IOC;
- } else if (rem)
- exceptions |= FPSCR_IXC;
- } else {
- d = 0;
- if (vdm.exponent | vdm.significand) {
- exceptions |= FPSCR_IXC;
- if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
- d = 1;
- else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) {
- d = 0;
- exceptions |= FPSCR_IOC;
- }
- }
- }
-
- pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
-
- vfp_put_float(state, d, sd);
-
- return exceptions;
+ struct vfp_double vdm;
+ u32 d, exceptions = 0;
+ int rmode = fpscr & FPSCR_RMODE_MASK;
+ int tm;
+
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+
+ /*
+ * Do we have a denormalised number?
+ */
+ tm = vfp_double_type(&vdm);
+ if (tm & VFP_DENORMAL)
+ exceptions |= FPSCR_IDC;
+
+ if (tm & VFP_NAN)
+ vdm.sign = 0;
+
+ if (vdm.exponent >= 1023 + 32) {
+ d = vdm.sign ? 0 : 0xffffffff;
+ exceptions = FPSCR_IOC;
+ } else if (vdm.exponent >= 1023 - 1) {
+ int shift = 1023 + 63 - vdm.exponent;
+ u64 rem, incr = 0;
+
+ /*
+ * 2^0 <= m < 2^32-2^8
+ */
+ d = (ARMword)((vdm.significand << 1) >> shift);
+ rem = vdm.significand << (65 - shift);
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 0x8000000000000000ULL;
+ if ((d & 1) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
+ incr = ~0ULL;
+ }
+
+ if ((rem + incr) < rem) {
+ if (d < 0xffffffff)
+ d += 1;
+ else
+ exceptions |= FPSCR_IOC;
+ }
+
+ if (d && vdm.sign) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ } else if (rem)
+ exceptions |= FPSCR_IXC;
+ } else {
+ d = 0;
+ if (vdm.exponent | vdm.significand) {
+ exceptions |= FPSCR_IXC;
+ if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
+ d = 1;
+ else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ }
+ }
+ }
+
+ pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
+
+ vfp_put_float(state, d, sd);
+
+ return exceptions;
}
static u32 vfp_double_ftouiz(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_ftoui(state, sd, unused, dm, FPSCR_ROUND_TOZERO);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_ftoui(state, sd, unused, dm, FPSCR_ROUND_TOZERO);
}
static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
{
- struct vfp_double vdm;
- u32 d, exceptions = 0;
- int rmode = fpscr & FPSCR_RMODE_MASK;
- int tm;
-
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- vfp_double_dump("VDM", &vdm);
-
- /*
- * Do we have denormalised number?
- */
- tm = vfp_double_type(&vdm);
- if (tm & VFP_DENORMAL)
- exceptions |= FPSCR_IDC;
-
- if (tm & VFP_NAN) {
- d = 0;
- exceptions |= FPSCR_IOC;
- } else if (vdm.exponent >= 1023 + 32) {
- d = 0x7fffffff;
- if (vdm.sign)
- d = ~d;
- exceptions |= FPSCR_IOC;
- } else if (vdm.exponent >= 1023 - 1) {
- int shift = 1023 + 63 - vdm.exponent; /* 58 */
- u64 rem, incr = 0;
-
- d = (vdm.significand << 1) >> shift;
- rem = vdm.significand << (65 - shift);
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 0x8000000000000000ULL;
- if ((d & 1) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
- incr = ~0ULL;
- }
-
- if ((rem + incr) < rem && d < 0xffffffff)
- d += 1;
- if (d > 0x7fffffff + (vdm.sign != 0)) {
- d = 0x7fffffff + (vdm.sign != 0);
- exceptions |= FPSCR_IOC;
- } else if (rem)
- exceptions |= FPSCR_IXC;
-
- if (vdm.sign)
- d = -d;
- } else {
- d = 0;
- if (vdm.exponent | vdm.significand) {
- exceptions |= FPSCR_IXC;
- if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
- d = 1;
- else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign)
- d = -1;
- }
- }
-
- pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
-
- vfp_put_float(state, (s32)d, sd);
-
- return exceptions;
+ struct vfp_double vdm;
+ u32 d, exceptions = 0;
+ int rmode = fpscr & FPSCR_RMODE_MASK;
+ int tm;
+
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ vfp_double_dump("VDM", &vdm);
+
+ /*
+ * Do we have denormalised number?
+ */
+ tm = vfp_double_type(&vdm);
+ if (tm & VFP_DENORMAL)
+ exceptions |= FPSCR_IDC;
+
+ if (tm & VFP_NAN) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ } else if (vdm.exponent >= 1023 + 32) {
+ d = 0x7fffffff;
+ if (vdm.sign)
+ d = ~d;
+ exceptions |= FPSCR_IOC;
+ } else if (vdm.exponent >= 1023 - 1) {
+ int shift = 1023 + 63 - vdm.exponent; /* 58 */
+ u64 rem, incr = 0;
+
+ d = (ARMword)((vdm.significand << 1) >> shift);
+ rem = vdm.significand << (65 - shift);
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 0x8000000000000000ULL;
+ if ((d & 1) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) {
+ incr = ~0ULL;
+ }
+
+ if ((rem + incr) < rem && d < 0xffffffff)
+ d += 1;
+ if (d > (0x7fffffff + (vdm.sign != 0))) {
+ d = (0x7fffffff + (vdm.sign != 0));
+ exceptions |= FPSCR_IOC;
+ } else if (rem)
+ exceptions |= FPSCR_IXC;
+
+ if (vdm.sign)
+ d = -d;
+ } else {
+ d = 0;
+ if (vdm.exponent | vdm.significand) {
+ exceptions |= FPSCR_IXC;
+ if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0)
+ d = 1;
+ else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign)
+ d = -1;
+ }
+ }
+
+ pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
+
+ vfp_put_float(state, (s32)d, sd);
+
+ return exceptions;
}
static u32 vfp_double_ftosiz(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_ftosi(state, dd, unused, dm, FPSCR_ROUND_TOZERO);
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_ftosi(state, dd, unused, dm, FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[] = {
@@ -728,197 +899,195 @@ static struct op fops_ext[] = {
static u32
vfp_double_fadd_nonnumber(struct vfp_double *vdd, struct vfp_double *vdn,
- struct vfp_double *vdm, u32 fpscr)
+ struct vfp_double *vdm, u32 fpscr)
{
- struct vfp_double *vdp;
- u32 exceptions = 0;
- int tn, tm;
-
- tn = vfp_double_type(vdn);
- tm = vfp_double_type(vdm);
-
- if (tn & tm & VFP_INFINITY) {
- /*
- * Two infinities. Are they different signs?
- */
- if (vdn->sign ^ vdm->sign) {
- /*
- * different signs -> invalid
- */
- exceptions = FPSCR_IOC;
- vdp = &vfp_double_default_qnan;
- } else {
- /*
- * same signs -> valid
- */
- vdp = vdn;
- }
- } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
- /*
- * One infinity and one number -> infinity
- */
- vdp = vdn;
- } else {
- /*
- * 'n' is a NaN of some type
- */
- return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
- }
- *vdd = *vdp;
- return exceptions;
+ struct vfp_double *vdp;
+ u32 exceptions = 0;
+ int tn, tm;
+
+ tn = vfp_double_type(vdn);
+ tm = vfp_double_type(vdm);
+
+ if (tn & tm & VFP_INFINITY) {
+ /*
+ * Two infinities. Are they different signs?
+ */
+ if (vdn->sign ^ vdm->sign) {
+ /*
+ * different signs -> invalid
+ */
+ exceptions = FPSCR_IOC;
+ vdp = &vfp_double_default_qnan;
+ } else {
+ /*
+ * same signs -> valid
+ */
+ vdp = vdn;
+ }
+ } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
+ /*
+ * One infinity and one number -> infinity
+ */
+ vdp = vdn;
+ } else {
+ /*
+ * 'n' is a NaN of some type
+ */
+ return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
+ }
+ *vdd = *vdp;
+ return exceptions;
}
-static u32
-vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn,
- struct vfp_double *vdm, u32 fpscr)
+u32 vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn,struct vfp_double *vdm, u32 fpscr)
{
- u32 exp_diff;
- u64 m_sig;
-
- if (vdn->significand & (1ULL << 63) ||
- vdm->significand & (1ULL << 63)) {
- pr_info("VFP: bad FP values\n");
- vfp_double_dump("VDN", vdn);
- vfp_double_dump("VDM", vdm);
- }
-
- /*
- * Ensure that 'n' is the largest magnitude number. Note that
- * if 'n' and 'm' have equal exponents, we do not swap them.
- * This ensures that NaN propagation works correctly.
- */
- if (vdn->exponent < vdm->exponent) {
- struct vfp_double *t = vdn;
- vdn = vdm;
- vdm = t;
- }
-
- /*
- * Is 'n' an infinity or a NaN? Note that 'm' may be a number,
- * infinity or a NaN here.
- */
- if (vdn->exponent == 2047)
- return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr);
-
- /*
- * We have two proper numbers, where 'vdn' is the larger magnitude.
- *
- * Copy 'n' to 'd' before doing the arithmetic.
- */
- *vdd = *vdn;
-
- /*
- * Align 'm' with the result.
- */
- exp_diff = vdn->exponent - vdm->exponent;
- m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff);
-
- /*
- * If the signs are different, we are really subtracting.
- */
- if (vdn->sign ^ vdm->sign) {
- m_sig = vdn->significand - m_sig;
- if ((s64)m_sig < 0) {
- vdd->sign = vfp_sign_negate(vdd->sign);
- m_sig = -m_sig;
- } else if (m_sig == 0) {
- vdd->sign = (fpscr & FPSCR_RMODE_MASK) ==
- FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
- }
- } else {
- m_sig += vdn->significand;
- }
- vdd->significand = m_sig;
-
- return 0;
+ u32 exp_diff;
+ u64 m_sig;
+
+ if (vdn->significand & (1ULL << 63) ||
+ vdm->significand & (1ULL << 63)) {
+ pr_info("VFP: bad FP values in %s\n", __func__);
+ vfp_double_dump("VDN", vdn);
+ vfp_double_dump("VDM", vdm);
+ }
+
+ /*
+ * Ensure that 'n' is the largest magnitude number. Note that
+ * if 'n' and 'm' have equal exponents, we do not swap them.
+ * This ensures that NaN propagation works correctly.
+ */
+ if (vdn->exponent < vdm->exponent) {
+ struct vfp_double *t = vdn;
+ vdn = vdm;
+ vdm = t;
+ }
+
+ /*
+ * Is 'n' an infinity or a NaN? Note that 'm' may be a number,
+ * infinity or a NaN here.
+ */
+ if (vdn->exponent == 2047)
+ return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr);
+
+ /*
+ * We have two proper numbers, where 'vdn' is the larger magnitude.
+ *
+ * Copy 'n' to 'd' before doing the arithmetic.
+ */
+ *vdd = *vdn;
+
+ /*
+ * Align 'm' with the result.
+ */
+ exp_diff = vdn->exponent - vdm->exponent;
+ m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff);
+
+ /*
+ * If the signs are different, we are really subtracting.
+ */
+ if (vdn->sign ^ vdm->sign) {
+ m_sig = vdn->significand - m_sig;
+ if ((s64)m_sig < 0) {
+ vdd->sign = vfp_sign_negate(vdd->sign);
+ m_sig = -m_sig;
+ } else if (m_sig == 0) {
+ vdd->sign = (fpscr & FPSCR_RMODE_MASK) ==
+ FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
+ }
+ } else {
+ m_sig += vdn->significand;
+ }
+ vdd->significand = m_sig;
+
+ return 0;
}
-static u32
+u32
vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn,
- struct vfp_double *vdm, u32 fpscr)
+ struct vfp_double *vdm, u32 fpscr)
{
- vfp_double_dump("VDN", vdn);
- vfp_double_dump("VDM", vdm);
-
- /*
- * Ensure that 'n' is the largest magnitude number. Note that
- * if 'n' and 'm' have equal exponents, we do not swap them.
- * This ensures that NaN propagation works correctly.
- */
- if (vdn->exponent < vdm->exponent) {
- struct vfp_double *t = vdn;
- vdn = vdm;
- vdm = t;
- pr_debug("VFP: swapping M <-> N\n");
- }
-
- vdd->sign = vdn->sign ^ vdm->sign;
-
- /*
- * If 'n' is an infinity or NaN, handle it. 'm' may be anything.
- */
- if (vdn->exponent == 2047) {
- if (vdn->significand || (vdm->exponent == 2047 && vdm->significand))
- return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
- if ((vdm->exponent | vdm->significand) == 0) {
- *vdd = vfp_double_default_qnan;
- return FPSCR_IOC;
- }
- vdd->exponent = vdn->exponent;
- vdd->significand = 0;
- return 0;
- }
-
- /*
- * If 'm' is zero, the result is always zero. In this case,
- * 'n' may be zero or a number, but it doesn't matter which.
- */
- if ((vdm->exponent | vdm->significand) == 0) {
- vdd->exponent = 0;
- vdd->significand = 0;
- return 0;
- }
-
- /*
- * We add 2 to the destination exponent for the same reason
- * as the addition case - though this time we have +1 from
- * each input operand.
- */
- vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2;
- vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand);
-
- vfp_double_dump("VDD", vdd);
- return 0;
+ vfp_double_dump("VDN", vdn);
+ vfp_double_dump("VDM", vdm);
+
+ /*
+ * Ensure that 'n' is the largest magnitude number. Note that
+ * if 'n' and 'm' have equal exponents, we do not swap them.
+ * This ensures that NaN propagation works correctly.
+ */
+ if (vdn->exponent < vdm->exponent) {
+ struct vfp_double *t = vdn;
+ vdn = vdm;
+ vdm = t;
+ pr_debug("VFP: swapping M <-> N\n");
+ }
+
+ vdd->sign = vdn->sign ^ vdm->sign;
+
+ /*
+ * If 'n' is an infinity or NaN, handle it. 'm' may be anything.
+ */
+ if (vdn->exponent == 2047) {
+ if (vdn->significand || (vdm->exponent == 2047 && vdm->significand))
+ return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
+ if ((vdm->exponent | vdm->significand) == 0) {
+ *vdd = vfp_double_default_qnan;
+ return FPSCR_IOC;
+ }
+ vdd->exponent = vdn->exponent;
+ vdd->significand = 0;
+ return 0;
+ }
+
+ /*
+ * If 'm' is zero, the result is always zero. In this case,
+ * 'n' may be zero or a number, but it doesn't matter which.
+ */
+ if ((vdm->exponent | vdm->significand) == 0) {
+ vdd->exponent = 0;
+ vdd->significand = 0;
+ return 0;
+ }
+
+ /*
+ * We add 2 to the destination exponent for the same reason
+ * as the addition case - though this time we have +1 from
+ * each input operand.
+ */
+ vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2;
+ vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand);
+
+ vfp_double_dump("VDD", vdd);
+ return 0;
}
#define NEG_MULTIPLY (1 << 0)
#define NEG_SUBTRACT (1 << 1)
static u32
-vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func)
+vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, char *func)
{
- struct vfp_double vdd, vdp, vdn, vdm;
- u32 exceptions;
+ struct vfp_double vdd, vdp, vdn, vdm;
+ u32 exceptions;
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- if (vdn.exponent == 0 && vdn.significand)
- vfp_double_normalise_denormal(&vdn);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- if (vdm.exponent == 0 && vdm.significand)
- vfp_double_normalise_denormal(&vdm);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ if (vdm.exponent == 0 && vdm.significand)
+ vfp_double_normalise_denormal(&vdm);
- exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
- if (negate & NEG_MULTIPLY)
- vdp.sign = vfp_sign_negate(vdp.sign);
+ exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
+ if (negate & NEG_MULTIPLY)
+ vdp.sign = vfp_sign_negate(vdp.sign);
- vfp_double_unpack(&vdn, vfp_get_double(state, dd));
- if (negate & NEG_SUBTRACT)
- vdn.sign = vfp_sign_negate(vdn.sign);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dd));
+ if (negate & NEG_SUBTRACT)
+ vdn.sign = vfp_sign_negate(vdn.sign);
- exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
+ exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, func);
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, func);
}
/*
@@ -930,8 +1099,8 @@ vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 f
*/
static u32 vfp_double_fmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, 0, "fmac");
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, 0, "fmac");
}
/*
@@ -939,8 +1108,8 @@ static u32 vfp_double_fmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fnmac(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
}
/*
@@ -948,8 +1117,8 @@ static u32 vfp_double_fnmac(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
*/
static u32 vfp_double_fmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
}
/*
@@ -957,8 +1126,8 @@ static u32 vfp_double_fmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- pr_debug("In %s\n", __FUNCTION__);
- return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
+ pr_debug("In %s\n", __FUNCTION__);
+ return vfp_double_multiply_accumulate(state, dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
/*
@@ -966,20 +1135,20 @@ static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
*/
static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- struct vfp_double vdd, vdn, vdm;
- u32 exceptions;
+ struct vfp_double vdd, vdn, vdm;
+ u32 exceptions;
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- if (vdn.exponent == 0 && vdn.significand)
- vfp_double_normalise_denormal(&vdn);
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- if (vdm.exponent == 0 && vdm.significand)
- vfp_double_normalise_denormal(&vdm);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ if (vdm.exponent == 0 && vdm.significand)
+ vfp_double_normalise_denormal(&vdm);
- exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul");
+ exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul");
}
/*
@@ -987,22 +1156,22 @@ static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- struct vfp_double vdd, vdn, vdm;
- u32 exceptions;
+ struct vfp_double vdd, vdn, vdm;
+ u32 exceptions;
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- if (vdn.exponent == 0 && vdn.significand)
- vfp_double_normalise_denormal(&vdn);
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- if (vdm.exponent == 0 && vdm.significand)
- vfp_double_normalise_denormal(&vdm);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ if (vdm.exponent == 0 && vdm.significand)
+ vfp_double_normalise_denormal(&vdm);
- exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
- vdd.sign = vfp_sign_negate(vdd.sign);
+ exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
+ vdd.sign = vfp_sign_negate(vdd.sign);
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul");
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul");
}
/*
@@ -1010,21 +1179,21 @@ static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
*/
static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- struct vfp_double vdd, vdn, vdm;
- u32 exceptions;
+ struct vfp_double vdd, vdn, vdm;
+ u32 exceptions;
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- if (vdn.exponent == 0 && vdn.significand)
- vfp_double_normalise_denormal(&vdn);
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- if (vdm.exponent == 0 && vdm.significand)
- vfp_double_normalise_denormal(&vdm);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ if (vdm.exponent == 0 && vdm.significand)
+ vfp_double_normalise_denormal(&vdm);
- exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
+ exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd");
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd");
}
/*
@@ -1032,26 +1201,26 @@ static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- struct vfp_double vdd, vdn, vdm;
- u32 exceptions;
+ struct vfp_double vdd, vdn, vdm;
+ u32 exceptions;
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- if (vdn.exponent == 0 && vdn.significand)
- vfp_double_normalise_denormal(&vdn);
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ if (vdn.exponent == 0 && vdn.significand)
+ vfp_double_normalise_denormal(&vdn);
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
- if (vdm.exponent == 0 && vdm.significand)
- vfp_double_normalise_denormal(&vdm);
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+ if (vdm.exponent == 0 && vdm.significand)
+ vfp_double_normalise_denormal(&vdm);
- /*
- * Subtraction is like addition, but with a negated operand.
- */
- vdm.sign = vfp_sign_negate(vdm.sign);
+ /*
+ * Subtraction is like addition, but with a negated operand.
+ */
+ vdm.sign = vfp_sign_negate(vdm.sign);
- exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
+ exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub");
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub");
}
/*
@@ -1059,120 +1228,120 @@ static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
*/
static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
{
- struct vfp_double vdd, vdn, vdm;
- u32 exceptions = 0;
- int tm, tn;
-
- pr_debug("In %s\n", __FUNCTION__);
- vfp_double_unpack(&vdn, vfp_get_double(state, dn));
- vfp_double_unpack(&vdm, vfp_get_double(state, dm));
-
- vdd.sign = vdn.sign ^ vdm.sign;
-
- tn = vfp_double_type(&vdn);
- tm = vfp_double_type(&vdm);
-
- /*
- * Is n a NAN?
- */
- if (tn & VFP_NAN)
- goto vdn_nan;
-
- /*
- * Is m a NAN?
- */
- if (tm & VFP_NAN)
- goto vdm_nan;
-
- /*
- * If n and m are infinity, the result is invalid
- * If n and m are zero, the result is invalid
- */
- if (tm & tn & (VFP_INFINITY|VFP_ZERO))
- goto invalid;
-
- /*
- * If n is infinity, the result is infinity
- */
- if (tn & VFP_INFINITY)
- goto infinity;
-
- /*
- * If m is zero, raise div0 exceptions
- */
- if (tm & VFP_ZERO)
- goto divzero;
-
- /*
- * If m is infinity, or n is zero, the result is zero
- */
- if (tm & VFP_INFINITY || tn & VFP_ZERO)
- goto zero;
-
- if (tn & VFP_DENORMAL)
- vfp_double_normalise_denormal(&vdn);
- if (tm & VFP_DENORMAL)
- vfp_double_normalise_denormal(&vdm);
-
- /*
- * Ok, we have two numbers, we can perform division.
- */
- vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1;
- vdm.significand <<= 1;
- if (vdm.significand <= (2 * vdn.significand)) {
- vdn.significand >>= 1;
- vdd.exponent++;
- }
- vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand);
- if ((vdd.significand & 0x1ff) <= 2) {
- u64 termh, terml, remh, reml;
- mul64to128(&termh, &terml, vdm.significand, vdd.significand);
- sub128(&remh, &reml, vdn.significand, 0, termh, terml);
- while ((s64)remh < 0) {
- vdd.significand -= 1;
- add128(&remh, &reml, remh, reml, 0, vdm.significand);
- }
- vdd.significand |= (reml != 0);
- }
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv");
-
- vdn_nan:
- exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
- pack:
- vfp_put_double(state, vfp_double_pack(&vdd), dd);
- return exceptions;
-
- vdm_nan:
- exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
- goto pack;
-
- zero:
- vdd.exponent = 0;
- vdd.significand = 0;
- goto pack;
-
- divzero:
- exceptions = FPSCR_DZC;
- infinity:
- vdd.exponent = 2047;
- vdd.significand = 0;
- goto pack;
-
- invalid:
- vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd);
- return FPSCR_IOC;
+ struct vfp_double vdd, vdn, vdm;
+ u32 exceptions = 0;
+ int tm, tn;
+
+ pr_debug("In %s\n", __FUNCTION__);
+ vfp_double_unpack(&vdn, vfp_get_double(state, dn));
+ vfp_double_unpack(&vdm, vfp_get_double(state, dm));
+
+ vdd.sign = vdn.sign ^ vdm.sign;
+
+ tn = vfp_double_type(&vdn);
+ tm = vfp_double_type(&vdm);
+
+ /*
+ * Is n a NAN?
+ */
+ if (tn & VFP_NAN)
+ goto vdn_nan;
+
+ /*
+ * Is m a NAN?
+ */
+ if (tm & VFP_NAN)
+ goto vdm_nan;
+
+ /*
+ * If n and m are infinity, the result is invalid
+ * If n and m are zero, the result is invalid
+ */
+ if (tm & tn & (VFP_INFINITY|VFP_ZERO))
+ goto invalid;
+
+ /*
+ * If n is infinity, the result is infinity
+ */
+ if (tn & VFP_INFINITY)
+ goto infinity;
+
+ /*
+ * If m is zero, raise div0 exceptions
+ */
+ if (tm & VFP_ZERO)
+ goto divzero;
+
+ /*
+ * If m is infinity, or n is zero, the result is zero
+ */
+ if (tm & VFP_INFINITY || tn & VFP_ZERO)
+ goto zero;
+
+ if (tn & VFP_DENORMAL)
+ vfp_double_normalise_denormal(&vdn);
+ if (tm & VFP_DENORMAL)
+ vfp_double_normalise_denormal(&vdm);
+
+ /*
+ * Ok, we have two numbers, we can perform division.
+ */
+ vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1;
+ vdm.significand <<= 1;
+ if (vdm.significand <= (2 * vdn.significand)) {
+ vdn.significand >>= 1;
+ vdd.exponent++;
+ }
+ vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand);
+ if ((vdd.significand & 0x1ff) <= 2) {
+ u64 termh, terml, remh, reml;
+ mul64to128(&termh, &terml, vdm.significand, vdd.significand);
+ sub128(&remh, &reml, vdn.significand, 0, termh, terml);
+ while ((s64)remh < 0) {
+ vdd.significand -= 1;
+ add128(&remh, &reml, remh, reml, 0, vdm.significand);
+ }
+ vdd.significand |= (reml != 0);
+ }
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv");
+
+vdn_nan:
+ exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
+pack:
+ vfp_put_double(state, vfp_double_pack(&vdd), dd);
+ return exceptions;
+
+vdm_nan:
+ exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
+ goto pack;
+
+zero:
+ vdd.exponent = 0;
+ vdd.significand = 0;
+ goto pack;
+
+divzero:
+ exceptions = FPSCR_DZC;
+infinity:
+ vdd.exponent = 2047;
+ vdd.significand = 0;
+ goto pack;
+
+invalid:
+ vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd);
+ return FPSCR_IOC;
}
static struct op fops[] = {
- { vfp_double_fmac, 0 },
- { vfp_double_fmsc, 0 },
- { vfp_double_fmul, 0 },
- { vfp_double_fadd, 0 },
- { vfp_double_fnmac, 0 },
- { vfp_double_fnmsc, 0 },
- { vfp_double_fnmul, 0 },
- { vfp_double_fsub, 0 },
- { vfp_double_fdiv, 0 },
+ { vfp_double_fmac, 0 },
+ { vfp_double_fmsc, 0 },
+ { vfp_double_fmul, 0 },
+ { vfp_double_fadd, 0 },
+ { vfp_double_fnmac, 0 },
+ { vfp_double_fnmsc, 0 },
+ { vfp_double_fnmul, 0 },
+ { vfp_double_fsub, 0 },
+ { vfp_double_fdiv, 0 },
};
#define FREG_BANK(x) ((x) & 0x0c)
@@ -1180,84 +1349,84 @@ static struct op fops[] = {
u32 vfp_double_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
{
- u32 op = inst & FOP_MASK;
- u32 exceptions = 0;
- unsigned int dest;
- unsigned int dn = vfp_get_dn(inst);
- unsigned int dm;
- unsigned int vecitr, veclen, vecstride;
- struct op *fop;
-
- pr_debug("In %s\n", __FUNCTION__);
- vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
-
- fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
-
- /*
- * fcvtds takes an sN register number as destination, not dN.
- * It also always operates on scalars.
- */
- if (fop->flags & OP_SD)
- dest = vfp_get_sd(inst);
- else
- dest = vfp_get_dd(inst);
-
- /*
- * f[us]ito takes a sN operand, not a dN operand.
- */
- if (fop->flags & OP_SM)
- dm = vfp_get_sm(inst);
- else
- dm = vfp_get_dm(inst);
-
- /*
- * If destination bank is zero, vector length is always '1'.
- * ARM DDI0100F C5.1.3, C5.3.2.
- */
- if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0))
- veclen = 0;
- else
- veclen = fpscr & FPSCR_LENGTH_MASK;
-
- pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
- (veclen >> FPSCR_LENGTH_BIT) + 1);
-
- if (!fop->fn) {
- printf("VFP: could not find double op %d\n", FEXT_TO_IDX(inst));
- goto invalid;
- }
-
- for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
- u32 except;
- char type;
-
- type = fop->flags & OP_SD ? 's' : 'd';
- if (op == FOP_EXT)
- pr_debug("VFP: itr%d (%c%u) = op[%u] (d%u)\n",
- vecitr >> FPSCR_LENGTH_BIT,
- type, dest, dn, dm);
- else
- pr_debug("VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)\n",
- vecitr >> FPSCR_LENGTH_BIT,
- type, dest, dn, FOP_TO_IDX(op), dm);
-
- except = fop->fn(state, dest, dn, dm, fpscr);
- pr_debug("VFP: itr%d: exceptions=%08x\n",
- vecitr >> FPSCR_LENGTH_BIT, except);
-
- exceptions |= except;
-
- /*
- * CHECK: It appears to be undefined whether we stop when
- * we encounter an exception. We continue.
- */
- dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3);
- dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3);
- if (FREG_BANK(dm) != 0)
- dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3);
- }
- return exceptions;
-
- invalid:
- return ~0;
+ u32 op = inst & FOP_MASK;
+ u32 exceptions = 0;
+ unsigned int dest;
+ unsigned int dn = vfp_get_dn(inst);
+ unsigned int dm;
+ unsigned int vecitr, veclen, vecstride;
+ struct op *fop;
+
+ pr_debug("In %s\n", __FUNCTION__);
+ vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
+
+ fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
+
+ /*
+ * fcvtds takes an sN register number as destination, not dN.
+ * It also always operates on scalars.
+ */
+ if (fop->flags & OP_SD)
+ dest = vfp_get_sd(inst);
+ else
+ dest = vfp_get_dd(inst);
+
+ /*
+ * f[us]ito takes a sN operand, not a dN operand.
+ */
+ if (fop->flags & OP_SM)
+ dm = vfp_get_sm(inst);
+ else
+ dm = vfp_get_dm(inst);
+
+ /*
+ * If destination bank is zero, vector length is always '1'.
+ * ARM DDI0100F C5.1.3, C5.3.2.
+ */
+ if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0))
+ veclen = 0;
+ else
+ veclen = fpscr & FPSCR_LENGTH_MASK;
+
+ pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
+ (veclen >> FPSCR_LENGTH_BIT) + 1);
+
+ if (!fop->fn) {
+ printf("VFP: could not find double op %d\n", FEXT_TO_IDX(inst));
+ goto invalid;
+ }
+
+ for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
+ u32 except;
+ char type;
+
+ type = fop->flags & OP_SD ? 's' : 'd';
+ if (op == FOP_EXT)
+ pr_debug("VFP: itr%d (%c%u) = op[%u] (d%u)\n",
+ vecitr >> FPSCR_LENGTH_BIT,
+ type, dest, dn, dm);
+ else
+ pr_debug("VFP: itr%d (%c%u) = (d%u) op[%u] (d%u)\n",
+ vecitr >> FPSCR_LENGTH_BIT,
+ type, dest, dn, FOP_TO_IDX(op), dm);
+
+ except = fop->fn(state, dest, dn, dm, fpscr);
+ pr_debug("VFP: itr%d: exceptions=%08x\n",
+ vecitr >> FPSCR_LENGTH_BIT, except);
+
+ exceptions |= except;
+
+ /*
+ * CHECK: It appears to be undefined whether we stop when
+ * we encounter an exception. We continue.
+ */
+ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 3);
+ dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 3);
+ if (FREG_BANK(dm) != 0)
+ dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 3);
+ }
+ return exceptions;
+
+invalid:
+ return ~0;
}
diff --git a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
index 8bcbd4fe..07d0c1f4 100644
--- a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
@@ -56,167 +56,167 @@
#include "core/arm/skyeye_common/vfp/vfp.h"
static struct vfp_single vfp_single_default_qnan = {
- //.exponent = 255,
- //.sign = 0,
- //.significand = VFP_SINGLE_SIGNIFICAND_QNAN,
+ 255,
+ 0,
+ VFP_SINGLE_SIGNIFICAND_QNAN,
};
static void vfp_single_dump(const char *str, struct vfp_single *s)
{
- pr_debug("VFP: %s: sign=%d exponent=%d significand=%08x\n",
- str, s->sign != 0, s->exponent, s->significand);
+ pr_debug("VFP: %s: sign=%d exponent=%d significand=%08x\n",
+ str, s->sign != 0, s->exponent, s->significand);
}
static void vfp_single_normalise_denormal(struct vfp_single *vs)
{
- int bits = 31 - vfp_fls(vs->significand);
+ int bits = 31 - fls(vs->significand);
- vfp_single_dump("normalise_denormal: in", vs);
+ vfp_single_dump("normalise_denormal: in", vs);
- if (bits) {
- vs->exponent -= bits - 1;
- vs->significand <<= bits;
- }
+ if (bits) {
+ vs->exponent -= bits - 1;
+ vs->significand <<= bits;
+ }
- vfp_single_dump("normalise_denormal: out", vs);
+ vfp_single_dump("normalise_denormal: out", vs);
}
u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func)
{
- u32 significand, incr, rmode;
- int exponent, shift, underflow;
-
- vfp_single_dump("pack: in", vs);
-
- /*
- * Infinities and NaNs are a special case.
- */
- if (vs->exponent == 255 && (vs->significand == 0 || exceptions))
- goto pack;
-
- /*
- * Special-case zero.
- */
- if (vs->significand == 0) {
- vs->exponent = 0;
- goto pack;
- }
-
- exponent = vs->exponent;
- significand = vs->significand;
-
- /*
- * Normalise first. Note that we shift the significand up to
- * bit 31, so we have VFP_SINGLE_LOW_BITS + 1 below the least
- * significant bit.
- */
- shift = 32 - vfp_fls(significand);
- if (shift < 32 && shift) {
- exponent -= shift;
- significand <<= shift;
- }
+ u32 significand, incr, rmode;
+ int exponent, shift, underflow;
+
+ vfp_single_dump("pack: in", vs);
+
+ /*
+ * Infinities and NaNs are a special case.
+ */
+ if (vs->exponent == 255 && (vs->significand == 0 || exceptions))
+ goto pack;
+
+ /*
+ * Special-case zero.
+ */
+ if (vs->significand == 0) {
+ vs->exponent = 0;
+ goto pack;
+ }
+
+ exponent = vs->exponent;
+ significand = vs->significand;
+
+ /*
+ * Normalise first. Note that we shift the significand up to
+ * bit 31, so we have VFP_SINGLE_LOW_BITS + 1 below the least
+ * significant bit.
+ */
+ shift = 32 - fls(significand);
+ if (shift < 32 && shift) {
+ exponent -= shift;
+ significand <<= shift;
+ }
#if 1
- vs->exponent = exponent;
- vs->significand = significand;
- vfp_single_dump("pack: normalised", vs);
+ vs->exponent = exponent;
+ vs->significand = significand;
+ vfp_single_dump("pack: normalised", vs);
#endif
- /*
- * Tiny number?
- */
- underflow = exponent < 0;
- if (underflow) {
- significand = vfp_shiftright32jamming(significand, -exponent);
- exponent = 0;
+ /*
+ * Tiny number?
+ */
+ underflow = exponent < 0;
+ if (underflow) {
+ significand = vfp_shiftright32jamming(significand, -exponent);
+ exponent = 0;
#if 1
- vs->exponent = exponent;
- vs->significand = significand;
- vfp_single_dump("pack: tiny number", vs);
+ vs->exponent = exponent;
+ vs->significand = significand;
+ vfp_single_dump("pack: tiny number", vs);
#endif
- if (!(significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1)))
- underflow = 0;
- }
-
- /*
- * Select rounding increment.
- */
- incr = 0;
- rmode = fpscr & FPSCR_RMODE_MASK;
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 1 << VFP_SINGLE_LOW_BITS;
- if ((significand & (1 << (VFP_SINGLE_LOW_BITS + 1))) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vs->sign != 0))
- incr = (1 << (VFP_SINGLE_LOW_BITS + 1)) - 1;
-
- pr_debug("VFP: rounding increment = 0x%08x\n", incr);
-
- /*
- * Is our rounding going to overflow?
- */
- if ((significand + incr) < significand) {
- exponent += 1;
- significand = (significand >> 1) | (significand & 1);
- incr >>= 1;
+ if (!(significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1)))
+ underflow = 0;
+ }
+
+ /*
+ * Select rounding increment.
+ */
+ incr = 0;
+ rmode = fpscr & FPSCR_RMODE_MASK;
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 1 << VFP_SINGLE_LOW_BITS;
+ if ((significand & (1 << (VFP_SINGLE_LOW_BITS + 1))) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vs->sign != 0))
+ incr = (1 << (VFP_SINGLE_LOW_BITS + 1)) - 1;
+
+ pr_debug("VFP: rounding increment = 0x%08x\n", incr);
+
+ /*
+ * Is our rounding going to overflow?
+ */
+ if ((significand + incr) < significand) {
+ exponent += 1;
+ significand = (significand >> 1) | (significand & 1);
+ incr >>= 1;
#if 1
- vs->exponent = exponent;
- vs->significand = significand;
- vfp_single_dump("pack: overflow", vs);
+ vs->exponent = exponent;
+ vs->significand = significand;
+ vfp_single_dump("pack: overflow", vs);
#endif
- }
-
- /*
- * If any of the low bits (which will be shifted out of the
- * number) are non-zero, the result is inexact.
- */
- if (significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1))
- exceptions |= FPSCR_IXC;
-
- /*
- * Do our rounding.
- */
- significand += incr;
-
- /*
- * Infinity?
- */
- if (exponent >= 254) {
- exceptions |= FPSCR_OFC | FPSCR_IXC;
- if (incr == 0) {
- vs->exponent = 253;
- vs->significand = 0x7fffffff;
- } else {
- vs->exponent = 255; /* infinity */
- vs->significand = 0;
- }
- } else {
- if (significand >> (VFP_SINGLE_LOW_BITS + 1) == 0)
- exponent = 0;
- if (exponent || significand > 0x80000000)
- underflow = 0;
- if (underflow)
- exceptions |= FPSCR_UFC;
- vs->exponent = exponent;
- vs->significand = significand >> 1;
- }
-
- pack:
- vfp_single_dump("pack: final", vs);
- {
- s32 d = vfp_single_pack(vs);
+ }
+
+ /*
+ * If any of the low bits (which will be shifted out of the
+ * number) are non-zero, the result is inexact.
+ */
+ if (significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1))
+ exceptions |= FPSCR_IXC;
+
+ /*
+ * Do our rounding.
+ */
+ significand += incr;
+
+ /*
+ * Infinity?
+ */
+ if (exponent >= 254) {
+ exceptions |= FPSCR_OFC | FPSCR_IXC;
+ if (incr == 0) {
+ vs->exponent = 253;
+ vs->significand = 0x7fffffff;
+ } else {
+ vs->exponent = 255; /* infinity */
+ vs->significand = 0;
+ }
+ } else {
+ if (significand >> (VFP_SINGLE_LOW_BITS + 1) == 0)
+ exponent = 0;
+ if (exponent || significand > 0x80000000)
+ underflow = 0;
+ if (underflow)
+ exceptions |= FPSCR_UFC;
+ vs->exponent = exponent;
+ vs->significand = significand >> 1;
+ }
+
+pack:
+ vfp_single_dump("pack: final", vs);
+ {
+ s32 d = vfp_single_pack(vs);
#if 1
- pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
- sd, d, exceptions);
+ pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func,
+ sd, d, exceptions);
#endif
- vfp_put_float(state, d, sd);
- }
+ vfp_put_float(state, d, sd);
+ }
- return exceptions;
+ return exceptions;
}
/*
@@ -225,43 +225,43 @@ u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs,
*/
static u32
vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
- struct vfp_single *vsm, u32 fpscr)
+ struct vfp_single *vsm, u32 fpscr)
{
- struct vfp_single *nan;
- int tn, tm = 0;
-
- tn = vfp_single_type(vsn);
-
- if (vsm)
- tm = vfp_single_type(vsm);
-
- if (fpscr & FPSCR_DEFAULT_NAN)
- /*
- * Default NaN mode - always returns a quiet NaN
- */
- nan = &vfp_single_default_qnan;
- else {
- /*
- * Contemporary mode - select the first signalling
- * NAN, or if neither are signalling, the first
- * quiet NAN.
- */
- if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
- nan = vsn;
- else
- nan = vsm;
- /*
- * Make the NaN quiet.
- */
- nan->significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
- }
-
- *vsd = *nan;
-
- /*
- * If one was a signalling NAN, raise invalid operation.
- */
- return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
+ struct vfp_single *nan;
+ int tn, tm = 0;
+
+ tn = vfp_single_type(vsn);
+
+ if (vsm)
+ tm = vfp_single_type(vsm);
+
+ if (fpscr & FPSCR_DEFAULT_NAN)
+ /*
+ * Default NaN mode - always returns a quiet NaN
+ */
+ nan = &vfp_single_default_qnan;
+ else {
+ /*
+ * Contemporary mode - select the first signalling
+ * NAN, or if neither are signalling, the first
+ * quiet NAN.
+ */
+ if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN))
+ nan = vsn;
+ else
+ nan = vsm;
+ /*
+ * Make the NaN quiet.
+ */
+ nan->significand |= VFP_SINGLE_SIGNIFICAND_QNAN;
+ }
+
+ *vsd = *nan;
+
+ /*
+ * If one was a signalling NAN, raise invalid operation.
+ */
+ return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : VFP_NAN_FLAG;
}
@@ -270,140 +270,140 @@ vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
*/
static u32 vfp_single_fabs(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(state, vfp_single_packed_abs(m), sd);
- return 0;
+ vfp_put_float(state, vfp_single_packed_abs(m), sd);
+ return 0;
}
static u32 vfp_single_fcpy(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(state, m, sd);
- return 0;
+ vfp_put_float(state, m, sd);
+ return 0;
}
static u32 vfp_single_fneg(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- vfp_put_float(state, vfp_single_packed_negate(m), sd);
- return 0;
+ vfp_put_float(state, vfp_single_packed_negate(m), sd);
+ return 0;
}
static const u16 sqrt_oddadjust[] = {
- 0x0004, 0x0022, 0x005d, 0x00b1, 0x011d, 0x019f, 0x0236, 0x02e0,
- 0x039c, 0x0468, 0x0545, 0x0631, 0x072b, 0x0832, 0x0946, 0x0a67
+ 0x0004, 0x0022, 0x005d, 0x00b1, 0x011d, 0x019f, 0x0236, 0x02e0,
+ 0x039c, 0x0468, 0x0545, 0x0631, 0x072b, 0x0832, 0x0946, 0x0a67
};
static const u16 sqrt_evenadjust[] = {
- 0x0a2d, 0x08af, 0x075a, 0x0629, 0x051a, 0x0429, 0x0356, 0x029e,
- 0x0200, 0x0179, 0x0109, 0x00af, 0x0068, 0x0034, 0x0012, 0x0002
+ 0x0a2d, 0x08af, 0x075a, 0x0629, 0x051a, 0x0429, 0x0356, 0x029e,
+ 0x0200, 0x0179, 0x0109, 0x00af, 0x0068, 0x0034, 0x0012, 0x0002
};
u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
{
- int index;
- u32 z, a;
-
- if ((significand & 0xc0000000) != 0x40000000) {
- pr_debug("VFP: estimate_sqrt: invalid significand\n");
- }
-
- a = significand << 1;
- index = (a >> 27) & 15;
- if (exponent & 1) {
- z = 0x4000 + (a >> 17) - sqrt_oddadjust[index];
- z = ((a / z) << 14) + (z << 15);
- a >>= 1;
- } else {
- z = 0x8000 + (a >> 17) - sqrt_evenadjust[index];
- z = a / z + z;
- z = (z >= 0x20000) ? 0xffff8000 : (z << 15);
- if (z <= a)
- return (s32)a >> 1;
- }
- {
- u64 v = (u64)a << 31;
- do_div(v, z);
- return v + (z >> 1);
- }
+ int index;
+ u32 z, a;
+
+ if ((significand & 0xc0000000) != 0x40000000) {
+ pr_debug("VFP: estimate_sqrt: invalid significand\n");
+ }
+
+ a = significand << 1;
+ index = (a >> 27) & 15;
+ if (exponent & 1) {
+ z = 0x4000 + (a >> 17) - sqrt_oddadjust[index];
+ z = ((a / z) << 14) + (z << 15);
+ a >>= 1;
+ } else {
+ z = 0x8000 + (a >> 17) - sqrt_evenadjust[index];
+ z = a / z + z;
+ z = (z >= 0x20000) ? 0xffff8000 : (z << 15);
+ if (z <= a)
+ return (s32)a >> 1;
+ }
+ {
+ u64 v = (u64)a << 31;
+ do_div(v, z);
+ return (u32)(v + (z >> 1));
+ }
}
static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vsm, vsd, *vsp;
- int ret, tm;
-
- vfp_single_unpack(&vsm, m);
- tm = vfp_single_type(&vsm);
- if (tm & (VFP_NAN|VFP_INFINITY)) {
- vsp = &vsd;
-
- if (tm & VFP_NAN)
- ret = vfp_propagate_nan(vsp, &vsm, NULL, fpscr);
- else if (vsm.sign == 0) {
- sqrt_copy:
- vsp = &vsm;
- ret = 0;
- } else {
- sqrt_invalid:
- vsp = &vfp_single_default_qnan;
- ret = FPSCR_IOC;
- }
- vfp_put_float(state, vfp_single_pack(vsp), sd);
- return ret;
- }
-
- /*
- * sqrt(+/- 0) == +/- 0
- */
- if (tm & VFP_ZERO)
- goto sqrt_copy;
-
- /*
- * Normalise a denormalised number
- */
- if (tm & VFP_DENORMAL)
- vfp_single_normalise_denormal(&vsm);
-
- /*
- * sqrt(<0) = invalid
- */
- if (vsm.sign)
- goto sqrt_invalid;
-
- vfp_single_dump("sqrt", &vsm);
-
- /*
- * Estimate the square root.
- */
- vsd.sign = 0;
- vsd.exponent = ((vsm.exponent - 127) >> 1) + 127;
- vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2;
-
- vfp_single_dump("sqrt estimate", &vsd);
-
- /*
- * And now adjust.
- */
- if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) {
- if (vsd.significand < 2) {
- vsd.significand = 0xffffffff;
- } else {
- u64 term;
- s64 rem;
- vsm.significand <<= !(vsm.exponent & 1);
- term = (u64)vsd.significand * vsd.significand;
- rem = ((u64)vsm.significand << 32) - term;
-
- pr_debug("VFP: term=%016llx rem=%016llx\n", term, rem);
-
- while (rem < 0) {
- vsd.significand -= 1;
- rem += ((u64)vsd.significand << 1) | 1;
- }
- vsd.significand |= rem != 0;
- }
- }
- vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
-
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt");
+ struct vfp_single vsm, vsd, *vsp;
+ int ret, tm;
+
+ vfp_single_unpack(&vsm, m);
+ tm = vfp_single_type(&vsm);
+ if (tm & (VFP_NAN|VFP_INFINITY)) {
+ vsp = &vsd;
+
+ if (tm & VFP_NAN)
+ ret = vfp_propagate_nan(vsp, &vsm, NULL, fpscr);
+ else if (vsm.sign == 0) {
+sqrt_copy:
+ vsp = &vsm;
+ ret = 0;
+ } else {
+sqrt_invalid:
+ vsp = &vfp_single_default_qnan;
+ ret = FPSCR_IOC;
+ }
+ vfp_put_float(state, vfp_single_pack(vsp), sd);
+ return ret;
+ }
+
+ /*
+ * sqrt(+/- 0) == +/- 0
+ */
+ if (tm & VFP_ZERO)
+ goto sqrt_copy;
+
+ /*
+ * Normalise a denormalised number
+ */
+ if (tm & VFP_DENORMAL)
+ vfp_single_normalise_denormal(&vsm);
+
+ /*
+ * sqrt(<0) = invalid
+ */
+ if (vsm.sign)
+ goto sqrt_invalid;
+
+ vfp_single_dump("sqrt", &vsm);
+
+ /*
+ * Estimate the square root.
+ */
+ vsd.sign = 0;
+ vsd.exponent = ((vsm.exponent - 127) >> 1) + 127;
+ vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2;
+
+ vfp_single_dump("sqrt estimate", &vsd);
+
+ /*
+ * And now adjust.
+ */
+ if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) {
+ if (vsd.significand < 2) {
+ vsd.significand = 0xffffffff;
+ } else {
+ u64 term;
+ s64 rem;
+ vsm.significand <<= !(vsm.exponent & 1);
+ term = (u64)vsd.significand * vsd.significand;
+ rem = ((u64)vsm.significand << 32) - term;
+
+ pr_debug("VFP: term=%016llx rem=%016llx\n", term, rem);
+
+ while (rem < 0) {
+ vsd.significand -= 1;
+ rem += ((u64)vsd.significand << 1) | 1;
+ }
+ vsd.significand |= rem != 0;
+ }
+ }
+ vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
+
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt");
}
/*
@@ -414,305 +414,346 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f
*/
static u32 vfp_compare(ARMul_State* state, int sd, int signal_on_qnan, s32 m, u32 fpscr)
{
- s32 d;
- u32 ret = 0;
-
- d = vfp_get_float(state, sd);
- if (vfp_single_packed_exponent(m) == 255 && vfp_single_packed_mantissa(m)) {
- ret |= FPSCR_C | FPSCR_V;
- if (signal_on_qnan || !(vfp_single_packed_mantissa(m) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
- /*
- * Signalling NaN, or signalling on quiet NaN
- */
- ret |= FPSCR_IOC;
- }
-
- if (vfp_single_packed_exponent(d) == 255 && vfp_single_packed_mantissa(d)) {
- ret |= FPSCR_C | FPSCR_V;
- if (signal_on_qnan || !(vfp_single_packed_mantissa(d) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
- /*
- * Signalling NaN, or signalling on quiet NaN
- */
- ret |= FPSCR_IOC;
- }
-
- if (ret == 0) {
- if (d == m || vfp_single_packed_abs(d | m) == 0) {
- /*
- * equal
- */
- ret |= FPSCR_Z | FPSCR_C;
- } else if (vfp_single_packed_sign(d ^ m)) {
- /*
- * different signs
- */
- if (vfp_single_packed_sign(d))
- /*
- * d is negative, so d < m
- */
- ret |= FPSCR_N;
- else
- /*
- * d is positive, so d > m
- */
- ret |= FPSCR_C;
- } else if ((vfp_single_packed_sign(d) != 0) ^ (d < m)) {
- /*
- * d < m
- */
- ret |= FPSCR_N;
- } else if ((vfp_single_packed_sign(d) != 0) ^ (d > m)) {
- /*
- * d > m
- */
- ret |= FPSCR_C;
- }
- }
- return ret;
+ s32 d;
+ u32 ret = 0;
+
+ d = vfp_get_float(state, sd);
+ if (vfp_single_packed_exponent(m) == 255 && vfp_single_packed_mantissa(m)) {
+ ret |= FPSCR_C | FPSCR_V;
+ if (signal_on_qnan || !(vfp_single_packed_mantissa(m) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
+ /*
+ * Signalling NaN, or signalling on quiet NaN
+ */
+ ret |= FPSCR_IOC;
+ }
+
+ if (vfp_single_packed_exponent(d) == 255 && vfp_single_packed_mantissa(d)) {
+ ret |= FPSCR_C | FPSCR_V;
+ if (signal_on_qnan || !(vfp_single_packed_mantissa(d) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1))))
+ /*
+ * Signalling NaN, or signalling on quiet NaN
+ */
+ ret |= FPSCR_IOC;
+ }
+
+ if (ret == 0) {
+ if (d == m || vfp_single_packed_abs(d | m) == 0) {
+ /*
+ * equal
+ */
+ ret |= FPSCR_Z | FPSCR_C;
+ } else if (vfp_single_packed_sign(d ^ m)) {
+ /*
+ * different signs
+ */
+ if (vfp_single_packed_sign(d))
+ /*
+ * d is negative, so d < m
+ */
+ ret |= FPSCR_N;
+ else
+ /*
+ * d is positive, so d > m
+ */
+ ret |= FPSCR_C;
+ } else if ((vfp_single_packed_sign(d) != 0) ^ (d < m)) {
+ /*
+ * d < m
+ */
+ ret |= FPSCR_N;
+ } else if ((vfp_single_packed_sign(d) != 0) ^ (d > m)) {
+ /*
+ * d > m
+ */
+ ret |= FPSCR_C;
+ }
+ }
+ return ret;
}
static u32 vfp_single_fcmp(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_compare(state, sd, 0, m, fpscr);
+ return vfp_compare(state, sd, 0, m, fpscr);
}
static u32 vfp_single_fcmpe(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_compare(state, sd, 1, m, fpscr);
+ return vfp_compare(state, sd, 1, m, fpscr);
}
static u32 vfp_single_fcmpz(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_compare(state, sd, 0, 0, fpscr);
+ return vfp_compare(state, sd, 0, 0, fpscr);
}
static u32 vfp_single_fcmpez(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_compare(state, sd, 1, 0, fpscr);
+ return vfp_compare(state, sd, 1, 0, fpscr);
+}
+
+static s64 vfp_single_to_doubleintern(ARMul_State* state, s32 m, u32 fpscr) //ichfly for internal use only
+{
+ struct vfp_single vsm;
+ struct vfp_double vdd;
+ int tm;
+ u32 exceptions = 0;
+
+ vfp_single_unpack(&vsm, m);
+
+ tm = vfp_single_type(&vsm);
+
+ /*
+ * If we have a signalling NaN, signal invalid operation.
+ */
+ if (tm == VFP_SNAN)
+ exceptions = FPSCR_IOC;
+
+ if (tm & VFP_DENORMAL)
+ vfp_single_normalise_denormal(&vsm);
+
+ vdd.sign = vsm.sign;
+ vdd.significand = (u64)vsm.significand << 32;
+
+ /*
+ * If we have an infinity or NaN, the exponent must be 2047.
+ */
+ if (tm & (VFP_INFINITY | VFP_NAN)) {
+ vdd.exponent = 2047;
+ if (tm == VFP_QNAN)
+ vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
+ goto pack_nan;
+ }
+ else if (tm & VFP_ZERO)
+ vdd.exponent = 0;
+ else
+ vdd.exponent = vsm.exponent + (1023 - 127);
+pack_nan:
+ vfp_double_normaliseroundintern(state, &vdd, fpscr, exceptions, "fcvtd");
+ return vfp_double_pack(&vdd);
}
static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vsm;
- struct vfp_double vdd;
- int tm;
- u32 exceptions = 0;
-
- vfp_single_unpack(&vsm, m);
-
- tm = vfp_single_type(&vsm);
-
- /*
- * If we have a signalling NaN, signal invalid operation.
- */
- if (tm == VFP_SNAN)
- exceptions = FPSCR_IOC;
-
- if (tm & VFP_DENORMAL)
- vfp_single_normalise_denormal(&vsm);
-
- vdd.sign = vsm.sign;
- vdd.significand = (u64)vsm.significand << 32;
-
- /*
- * If we have an infinity or NaN, the exponent must be 2047.
- */
- if (tm & (VFP_INFINITY|VFP_NAN)) {
- vdd.exponent = 2047;
- if (tm == VFP_QNAN)
- vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
- goto pack_nan;
- } else if (tm & VFP_ZERO)
- vdd.exponent = 0;
- else
- vdd.exponent = vsm.exponent + (1023 - 127);
-
- return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fcvtd");
-
- pack_nan:
- vfp_put_double(state, vfp_double_pack(&vdd), dd);
- return exceptions;
+ struct vfp_single vsm;
+ struct vfp_double vdd;
+ int tm;
+ u32 exceptions = 0;
+
+ vfp_single_unpack(&vsm, m);
+
+ tm = vfp_single_type(&vsm);
+
+ /*
+ * If we have a signalling NaN, signal invalid operation.
+ */
+ if (tm == VFP_SNAN)
+ exceptions = FPSCR_IOC;
+
+ if (tm & VFP_DENORMAL)
+ vfp_single_normalise_denormal(&vsm);
+
+ vdd.sign = vsm.sign;
+ vdd.significand = (u64)vsm.significand << 32;
+
+ /*
+ * If we have an infinity or NaN, the exponent must be 2047.
+ */
+ if (tm & (VFP_INFINITY|VFP_NAN)) {
+ vdd.exponent = 2047;
+ if (tm == VFP_QNAN)
+ vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN;
+ goto pack_nan;
+ } else if (tm & VFP_ZERO)
+ vdd.exponent = 0;
+ else
+ vdd.exponent = vsm.exponent + (1023 - 127);
+
+ return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fcvtd");
+
+pack_nan:
+ vfp_put_double(state, vfp_double_pack(&vdd), dd);
+ return exceptions;
}
static u32 vfp_single_fuito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vs;
+ struct vfp_single vs;
- vs.sign = 0;
- vs.exponent = 127 + 31 - 1;
- vs.significand = (u32)m;
+ vs.sign = 0;
+ vs.exponent = 127 + 31 - 1;
+ vs.significand = (u32)m;
- return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fuito");
+ return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fuito");
}
static u32 vfp_single_fsito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vs;
+ struct vfp_single vs;
- vs.sign = (m & 0x80000000) >> 16;
- vs.exponent = 127 + 31 - 1;
- vs.significand = vs.sign ? -m : m;
+ vs.sign = (m & 0x80000000) >> 16;
+ vs.exponent = 127 + 31 - 1;
+ vs.significand = vs.sign ? -m : m;
- return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fsito");
+ return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fsito");
}
static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vsm;
- u32 d, exceptions = 0;
- int rmode = fpscr & FPSCR_RMODE_MASK;
- int tm;
-
- vfp_single_unpack(&vsm, m);
- vfp_single_dump("VSM", &vsm);
-
- /*
- * Do we have a denormalised number?
- */
- tm = vfp_single_type(&vsm);
- if (tm & VFP_DENORMAL)
- exceptions |= FPSCR_IDC;
-
- if (tm & VFP_NAN)
- vsm.sign = 0;
-
- if (vsm.exponent >= 127 + 32) {
- d = vsm.sign ? 0 : 0xffffffff;
- exceptions = FPSCR_IOC;
- } else if (vsm.exponent >= 127 - 1) {
- int shift = 127 + 31 - vsm.exponent;
- u32 rem, incr = 0;
-
- /*
- * 2^0 <= m < 2^32-2^8
- */
- d = (vsm.significand << 1) >> shift;
- rem = vsm.significand << (33 - shift);
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 0x80000000;
- if ((d & 1) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
- incr = ~0;
- }
-
- if ((rem + incr) < rem) {
- if (d < 0xffffffff)
- d += 1;
- else
- exceptions |= FPSCR_IOC;
- }
-
- if (d && vsm.sign) {
- d = 0;
- exceptions |= FPSCR_IOC;
- } else if (rem)
- exceptions |= FPSCR_IXC;
- } else {
- d = 0;
- if (vsm.exponent | vsm.significand) {
- exceptions |= FPSCR_IXC;
- if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
- d = 1;
- else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) {
- d = 0;
- exceptions |= FPSCR_IOC;
- }
- }
- }
-
- pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
-
- vfp_put_float(state, d, sd);
-
- return exceptions;
+ struct vfp_single vsm;
+ u32 d, exceptions = 0;
+ int rmode = fpscr & FPSCR_RMODE_MASK;
+ int tm;
+
+ vfp_single_unpack(&vsm, m);
+ vfp_single_dump("VSM", &vsm);
+
+ /*
+ * Do we have a denormalised number?
+ */
+ tm = vfp_single_type(&vsm);
+ if (tm & VFP_DENORMAL)
+ exceptions |= FPSCR_IDC;
+
+ if (tm & VFP_NAN)
+ vsm.sign = 0;
+
+ if (vsm.exponent >= 127 + 32) {
+ d = vsm.sign ? 0 : 0xffffffff;
+ exceptions = FPSCR_IOC;
+ } else if (vsm.exponent >= 127 - 1) {
+ int shift = 127 + 31 - vsm.exponent;
+ u32 rem, incr = 0;
+
+ /*
+ * 2^0 <= m < 2^32-2^8
+ */
+ d = (vsm.significand << 1) >> shift;
+ rem = vsm.significand << (33 - shift);
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 0x80000000;
+ if ((d & 1) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
+ incr = ~0;
+ }
+
+ if ((rem + incr) < rem) {
+ if (d < 0xffffffff)
+ d += 1;
+ else
+ exceptions |= FPSCR_IOC;
+ }
+
+ if (d && vsm.sign) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ } else if (rem)
+ exceptions |= FPSCR_IXC;
+ } else {
+ d = 0;
+ if (vsm.exponent | vsm.significand) {
+ exceptions |= FPSCR_IXC;
+ if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
+ d = 1;
+ else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ }
+ }
+ }
+
+ pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
+
+ vfp_put_float(state, d, sd);
+
+ return exceptions;
}
static u32 vfp_single_ftouiz(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_single_ftoui(state, sd, unused, m, FPSCR_ROUND_TOZERO);
+ return vfp_single_ftoui(state, sd, unused, m, FPSCR_ROUND_TOZERO);
}
static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- struct vfp_single vsm;
- u32 d, exceptions = 0;
- int rmode = fpscr & FPSCR_RMODE_MASK;
- int tm;
-
- vfp_single_unpack(&vsm, m);
- vfp_single_dump("VSM", &vsm);
-
- /*
- * Do we have a denormalised number?
- */
- tm = vfp_single_type(&vsm);
- if (vfp_single_type(&vsm) & VFP_DENORMAL)
- exceptions |= FPSCR_IDC;
-
- if (tm & VFP_NAN) {
- d = 0;
- exceptions |= FPSCR_IOC;
- } else if (vsm.exponent >= 127 + 32) {
- /*
- * m >= 2^31-2^7: invalid
- */
- d = 0x7fffffff;
- if (vsm.sign)
- d = ~d;
- exceptions |= FPSCR_IOC;
- } else if (vsm.exponent >= 127 - 1) {
- int shift = 127 + 31 - vsm.exponent;
- u32 rem, incr = 0;
-
- /* 2^0 <= m <= 2^31-2^7 */
- d = (vsm.significand << 1) >> shift;
- rem = vsm.significand << (33 - shift);
-
- if (rmode == FPSCR_ROUND_NEAREST) {
- incr = 0x80000000;
- if ((d & 1) == 0)
- incr -= 1;
- } else if (rmode == FPSCR_ROUND_TOZERO) {
- incr = 0;
- } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
- incr = ~0;
- }
-
- if ((rem + incr) < rem && d < 0xffffffff)
- d += 1;
- if (d > 0x7fffffff + (vsm.sign != 0)) {
- d = 0x7fffffff + (vsm.sign != 0);
- exceptions |= FPSCR_IOC;
- } else if (rem)
- exceptions |= FPSCR_IXC;
-
- if (vsm.sign)
- d = -d;
- } else {
- d = 0;
- if (vsm.exponent | vsm.significand) {
- exceptions |= FPSCR_IXC;
- if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
- d = 1;
- else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign)
- d = -1;
- }
- }
-
- pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
-
- vfp_put_float(state, (s32)d, sd);
-
- return exceptions;
+ struct vfp_single vsm;
+ u32 d, exceptions = 0;
+ int rmode = fpscr & FPSCR_RMODE_MASK;
+ int tm;
+
+ vfp_single_unpack(&vsm, m);
+ vfp_single_dump("VSM", &vsm);
+
+ /*
+ * Do we have a denormalised number?
+ */
+ tm = vfp_single_type(&vsm);
+ if (vfp_single_type(&vsm) & VFP_DENORMAL)
+ exceptions |= FPSCR_IDC;
+
+ if (tm & VFP_NAN) {
+ d = 0;
+ exceptions |= FPSCR_IOC;
+ } else if (vsm.exponent >= 127 + 32) {
+ /*
+ * m >= 2^31-2^7: invalid
+ */
+ d = 0x7fffffff;
+ if (vsm.sign)
+ d = ~d;
+ exceptions |= FPSCR_IOC;
+ } else if (vsm.exponent >= 127 - 1) {
+ int shift = 127 + 31 - vsm.exponent;
+ u32 rem, incr = 0;
+
+ /* 2^0 <= m <= 2^31-2^7 */
+ d = (vsm.significand << 1) >> shift;
+ rem = vsm.significand << (33 - shift);
+
+ if (rmode == FPSCR_ROUND_NEAREST) {
+ incr = 0x80000000;
+ if ((d & 1) == 0)
+ incr -= 1;
+ } else if (rmode == FPSCR_ROUND_TOZERO) {
+ incr = 0;
+ } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) {
+ incr = ~0;
+ }
+
+ if ((rem + incr) < rem && d < 0xffffffff)
+ d += 1;
+ if (d > (0x7fffffffu + (vsm.sign != 0))) {
+ d = (0x7fffffffu + (vsm.sign != 0));
+ exceptions |= FPSCR_IOC;
+ } else if (rem)
+ exceptions |= FPSCR_IXC;
+
+ if (vsm.sign)
+ d = 0-d;
+ } else {
+ d = 0;
+ if (vsm.exponent | vsm.significand) {
+ exceptions |= FPSCR_IXC;
+ if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0)
+ d = 1;
+ else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign)
+ d = -1;
+ }
+ }
+
+ pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions);
+
+ vfp_put_float(state, (s32)d, sd);
+
+ return exceptions;
}
static u32 vfp_single_ftosiz(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
{
- return vfp_single_ftosi(state, sd, unused, m, FPSCR_ROUND_TOZERO);
+ return vfp_single_ftosi(state, sd, unused, m, FPSCR_ROUND_TOZERO);
}
static struct op fops_ext[] = {
@@ -752,200 +793,237 @@ static struct op fops_ext[] = {
static u32
vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
- struct vfp_single *vsm, u32 fpscr)
+ struct vfp_single *vsm, u32 fpscr)
{
- struct vfp_single *vsp;
- u32 exceptions = 0;
- int tn, tm;
-
- tn = vfp_single_type(vsn);
- tm = vfp_single_type(vsm);
-
- if (tn & tm & VFP_INFINITY) {
- /*
- * Two infinities. Are they different signs?
- */
- if (vsn->sign ^ vsm->sign) {
- /*
- * different signs -> invalid
- */
- exceptions = FPSCR_IOC;
- vsp = &vfp_single_default_qnan;
- } else {
- /*
- * same signs -> valid
- */
- vsp = vsn;
- }
- } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
- /*
- * One infinity and one number -> infinity
- */
- vsp = vsn;
- } else {
- /*
- * 'n' is a NaN of some type
- */
- return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
- }
- *vsd = *vsp;
- return exceptions;
+ struct vfp_single *vsp;
+ u32 exceptions = 0;
+ int tn, tm;
+
+ tn = vfp_single_type(vsn);
+ tm = vfp_single_type(vsm);
+
+ if (tn & tm & VFP_INFINITY) {
+ /*
+ * Two infinities. Are they different signs?
+ */
+ if (vsn->sign ^ vsm->sign) {
+ /*
+ * different signs -> invalid
+ */
+ exceptions = FPSCR_IOC;
+ vsp = &vfp_single_default_qnan;
+ } else {
+ /*
+ * same signs -> valid
+ */
+ vsp = vsn;
+ }
+ } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) {
+ /*
+ * One infinity and one number -> infinity
+ */
+ vsp = vsn;
+ } else {
+ /*
+ * 'n' is a NaN of some type
+ */
+ return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
+ }
+ *vsd = *vsp;
+ return exceptions;
}
static u32
vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn,
- struct vfp_single *vsm, u32 fpscr)
+ struct vfp_single *vsm, u32 fpscr)
{
- u32 exp_diff, m_sig;
-
- if (vsn->significand & 0x80000000 ||
- vsm->significand & 0x80000000) {
- pr_info("VFP: bad FP values\n");
- vfp_single_dump("VSN", vsn);
- vfp_single_dump("VSM", vsm);
- }
-
- /*
- * Ensure that 'n' is the largest magnitude number. Note that
- * if 'n' and 'm' have equal exponents, we do not swap them.
- * This ensures that NaN propagation works correctly.
- */
- if (vsn->exponent < vsm->exponent) {
- struct vfp_single *t = vsn;
- vsn = vsm;
- vsm = t;
- }
-
- /*
- * Is 'n' an infinity or a NaN? Note that 'm' may be a number,
- * infinity or a NaN here.
- */
- if (vsn->exponent == 255)
- return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr);
-
- /*
- * We have two proper numbers, where 'vsn' is the larger magnitude.
- *
- * Copy 'n' to 'd' before doing the arithmetic.
- */
- *vsd = *vsn;
-
- /*
- * Align both numbers.
- */
- exp_diff = vsn->exponent - vsm->exponent;
- m_sig = vfp_shiftright32jamming(vsm->significand, exp_diff);
-
- /*
- * If the signs are different, we are really subtracting.
- */
- if (vsn->sign ^ vsm->sign) {
- m_sig = vsn->significand - m_sig;
- if ((s32)m_sig < 0) {
- vsd->sign = vfp_sign_negate(vsd->sign);
- m_sig = -m_sig;
- } else if (m_sig == 0) {
- vsd->sign = (fpscr & FPSCR_RMODE_MASK) ==
- FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
- }
- } else {
- m_sig = vsn->significand + m_sig;
- }
- vsd->significand = m_sig;
-
- return 0;
+ u32 exp_diff, m_sig;
+
+ if (vsn->significand & 0x80000000 ||
+ vsm->significand & 0x80000000) {
+ pr_info("VFP: bad FP values in %s\n", __func__);
+ vfp_single_dump("VSN", vsn);
+ vfp_single_dump("VSM", vsm);
+ }
+
+ /*
+ * Ensure that 'n' is the largest magnitude number. Note that
+ * if 'n' and 'm' have equal exponents, we do not swap them.
+ * This ensures that NaN propagation works correctly.
+ */
+ if (vsn->exponent < vsm->exponent) {
+ struct vfp_single *t = vsn;
+ vsn = vsm;
+ vsm = t;
+ }
+
+ /*
+ * Is 'n' an infinity or a NaN? Note that 'm' may be a number,
+ * infinity or a NaN here.
+ */
+ if (vsn->exponent == 255)
+ return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr);
+
+ /*
+ * We have two proper numbers, where 'vsn' is the larger magnitude.
+ *
+ * Copy 'n' to 'd' before doing the arithmetic.
+ */
+ *vsd = *vsn;
+
+ /*
+ * Align both numbers.
+ */
+ exp_diff = vsn->exponent - vsm->exponent;
+ m_sig = vfp_shiftright32jamming(vsm->significand, exp_diff);
+
+ /*
+ * If the signs are different, we are really subtracting.
+ */
+ if (vsn->sign ^ vsm->sign) {
+ m_sig = vsn->significand - m_sig;
+ if ((s32)m_sig < 0) {
+ vsd->sign = vfp_sign_negate(vsd->sign);
+ m_sig = 0-m_sig;
+ } else if (m_sig == 0) {
+ vsd->sign = (fpscr & FPSCR_RMODE_MASK) ==
+ FPSCR_ROUND_MINUSINF ? 0x8000 : 0;
+ }
+ } else {
+ m_sig = vsn->significand + m_sig;
+ }
+ vsd->significand = m_sig;
+
+ return 0;
}
static u32
vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_single *vsm, u32 fpscr)
{
- vfp_single_dump("VSN", vsn);
- vfp_single_dump("VSM", vsm);
-
- /*
- * Ensure that 'n' is the largest magnitude number. Note that
- * if 'n' and 'm' have equal exponents, we do not swap them.
- * This ensures that NaN propagation works correctly.
- */
- if (vsn->exponent < vsm->exponent) {
- struct vfp_single *t = vsn;
- vsn = vsm;
- vsm = t;
- pr_debug("VFP: swapping M <-> N\n");
- }
-
- vsd->sign = vsn->sign ^ vsm->sign;
-
- /*
- * If 'n' is an infinity or NaN, handle it. 'm' may be anything.
- */
- if (vsn->exponent == 255) {
- if (vsn->significand || (vsm->exponent == 255 && vsm->significand))
- return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
- if ((vsm->exponent | vsm->significand) == 0) {
- *vsd = vfp_single_default_qnan;
- return FPSCR_IOC;
- }
- vsd->exponent = vsn->exponent;
- vsd->significand = 0;
- return 0;
- }
-
- /*
- * If 'm' is zero, the result is always zero. In this case,
- * 'n' may be zero or a number, but it doesn't matter which.
- */
- if ((vsm->exponent | vsm->significand) == 0) {
- vsd->exponent = 0;
- vsd->significand = 0;
- return 0;
- }
-
- /*
- * We add 2 to the destination exponent for the same reason as
- * the addition case - though this time we have +1 from each
- * input operand.
- */
- vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2;
- vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand);
-
- vfp_single_dump("VSD", vsd);
- return 0;
+ vfp_single_dump("VSN", vsn);
+ vfp_single_dump("VSM", vsm);
+
+ /*
+ * Ensure that 'n' is the largest magnitude number. Note that
+ * if 'n' and 'm' have equal exponents, we do not swap them.
+ * This ensures that NaN propagation works correctly.
+ */
+ if (vsn->exponent < vsm->exponent) {
+ struct vfp_single *t = vsn;
+ vsn = vsm;
+ vsm = t;
+ pr_debug("VFP: swapping M <-> N\n");
+ }
+
+ vsd->sign = vsn->sign ^ vsm->sign;
+
+ /*
+ * If 'n' is an infinity or NaN, handle it. 'm' may be anything.
+ */
+ if (vsn->exponent == 255) {
+ if (vsn->significand || (vsm->exponent == 255 && vsm->significand))
+ return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
+ if ((vsm->exponent | vsm->significand) == 0) {
+ *vsd = vfp_single_default_qnan;
+ return FPSCR_IOC;
+ }
+ vsd->exponent = vsn->exponent;
+ vsd->significand = 0;
+ return 0;
+ }
+
+ /*
+ * If 'm' is zero, the result is always zero. In this case,
+ * 'n' may be zero or a number, but it doesn't matter which.
+ */
+ if ((vsm->exponent | vsm->significand) == 0) {
+ vsd->exponent = 0;
+ vsd->significand = 0;
+ return 0;
+ }
+
+ /*
+ * We add 2 to the destination exponent for the same reason as
+ * the addition case - though this time we have +1 from each
+ * input operand.
+ */
+ vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2;
+ vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand);
+
+ vfp_single_dump("VSD", vsd);
+ return 0;
}
#define NEG_MULTIPLY (1 << 0)
#define NEG_SUBTRACT (1 << 1)
static u32
-vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func)
+vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, char *func)
{
- struct vfp_single vsd, vsp, vsn, vsm;
- u32 exceptions;
- s32 v;
+
+ {
+ struct vfp_single vsd, vsp, vsn, vsm;
+ u32 exceptions;
+ s32 v;
+
+
+
+ v = vfp_get_float(state, sn);
+ pr_debug("VFP: s%u = %08x\n", sn, v);
+ vfp_single_unpack(&vsn, v);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
+
+ vfp_single_unpack(&vsm, m);
+ if (vsm.exponent == 0 && vsm.significand)
+ vfp_single_normalise_denormal(&vsm);
+
+ exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
+
+ if (negate & NEG_MULTIPLY)
+ vsp.sign = vfp_sign_negate(vsp.sign);
+
+ v = vfp_get_float(state, sd);
+ pr_debug("VFP: s%u = %08x\n", sd, v);
+ vfp_single_unpack(&vsn, v);
+ if (negate & NEG_SUBTRACT)
+ vsn.sign = vfp_sign_negate(vsn.sign);
+
+ exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
+
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, func);
+ }
+
+ struct vfp_double vsd, vsp, vsn, vsm;
+ u32 exceptions;
+ s32 v;
+ s64 vd;
+ s64 md;
+
+ v = vfp_get_float(state, sn);
+ vd = vfp_single_to_doubleintern(state, v, fpscr);
+ vfp_double_unpack(&vsn, vd);
+
+ md = vfp_single_to_doubleintern(state, m, fpscr);
+ vfp_double_unpack(&vsm, md);
+
+ exceptions = vfp_double_multiply(&vsp, &vsn, &vsm, fpscr);
+ if (negate & NEG_MULTIPLY)
+ vsp.sign = vfp_sign_negate(vsp.sign);
- v = vfp_get_float(state, sn);
- pr_debug("VFP: s%u = %08x\n", sn, v);
- vfp_single_unpack(&vsn, v);
- if (vsn.exponent == 0 && vsn.significand)
- vfp_single_normalise_denormal(&vsn);
+ v = vfp_get_float(state, sd);
+ vd = vfp_single_to_doubleintern(state, v, fpscr);
+ vfp_double_unpack(&vsn, vd);
- vfp_single_unpack(&vsm, m);
- if (vsm.exponent == 0 && vsm.significand)
- vfp_single_normalise_denormal(&vsm);
+ if (negate & NEG_SUBTRACT)
+ vsn.sign = vfp_sign_negate(vsn.sign);
- exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
- if (negate & NEG_MULTIPLY)
- vsp.sign = vfp_sign_negate(vsp.sign);
+ exceptions |= vfp_double_add(&vsd, &vsn, &vsp, fpscr);
- v = vfp_get_float(state, sd);
- pr_debug("VFP: s%u = %08x\n", sd, v);
- vfp_single_unpack(&vsn, v);
- if (negate & NEG_SUBTRACT)
- vsn.sign = vfp_sign_negate(vsn.sign);
+ s64 debug = vfp_double_pack(&vsd);
- exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
+ return vfp_double_fcvtsinterncutting(state, sd, &vsd, fpscr);
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, func);
}
/*
@@ -957,8 +1035,8 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp
*/
static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
- return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac");
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
+ return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac");
}
/*
@@ -966,8 +1044,8 @@ static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fnmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sd, sn);
- return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac");
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sd, sn);
+ return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac");
}
/*
@@ -975,8 +1053,8 @@ static u32 vfp_single_fnmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
*/
static u32 vfp_single_fmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
- return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc");
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
+ return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc");
}
/*
@@ -984,8 +1062,8 @@ static u32 vfp_single_fmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
- return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
+ return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
}
/*
@@ -993,22 +1071,22 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
*/
static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- struct vfp_single vsd, vsn, vsm;
- u32 exceptions;
- s32 n = vfp_get_float(state, sn);
+ struct vfp_single vsd, vsn, vsm;
+ u32 exceptions;
+ s32 n = vfp_get_float(state, sn);
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, n);
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, n);
- vfp_single_unpack(&vsn, n);
- if (vsn.exponent == 0 && vsn.significand)
- vfp_single_normalise_denormal(&vsn);
+ vfp_single_unpack(&vsn, n);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
- vfp_single_unpack(&vsm, m);
- if (vsm.exponent == 0 && vsm.significand)
- vfp_single_normalise_denormal(&vsm);
+ vfp_single_unpack(&vsm, m);
+ if (vsm.exponent == 0 && vsm.significand)
+ vfp_single_normalise_denormal(&vsm);
- exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul");
+ exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul");
}
/*
@@ -1016,23 +1094,23 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- struct vfp_single vsd, vsn, vsm;
- u32 exceptions;
- s32 n = vfp_get_float(state, sn);
+ struct vfp_single vsd, vsn, vsm;
+ u32 exceptions;
+ s32 n = vfp_get_float(state, sn);
- pr_debug("VFP: s%u = %08x\n", sn, n);
+ pr_debug("VFP: s%u = %08x\n", sn, n);
- vfp_single_unpack(&vsn, n);
- if (vsn.exponent == 0 && vsn.significand)
- vfp_single_normalise_denormal(&vsn);
+ vfp_single_unpack(&vsn, n);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
- vfp_single_unpack(&vsm, m);
- if (vsm.exponent == 0 && vsm.significand)
- vfp_single_normalise_denormal(&vsm);
+ vfp_single_unpack(&vsm, m);
+ if (vsm.exponent == 0 && vsm.significand)
+ vfp_single_normalise_denormal(&vsm);
- exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
- vsd.sign = vfp_sign_negate(vsd.sign);
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul");
+ exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
+ vsd.sign = vfp_sign_negate(vsd.sign);
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul");
}
/*
@@ -1040,26 +1118,26 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
*/
static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- struct vfp_single vsd, vsn, vsm;
- u32 exceptions;
- s32 n = vfp_get_float(state, sn);
+ struct vfp_single vsd, vsn, vsm;
+ u32 exceptions;
+ s32 n = vfp_get_float(state, sn);
- pr_debug("VFP: s%u = %08x\n", sn, n);
+ pr_debug("VFP: s%u = %08x\n", sn, n);
- /*
- * Unpack and normalise denormals.
- */
- vfp_single_unpack(&vsn, n);
- if (vsn.exponent == 0 && vsn.significand)
- vfp_single_normalise_denormal(&vsn);
+ /*
+ * Unpack and normalise denormals.
+ */
+ vfp_single_unpack(&vsn, n);
+ if (vsn.exponent == 0 && vsn.significand)
+ vfp_single_normalise_denormal(&vsn);
- vfp_single_unpack(&vsm, m);
- if (vsm.exponent == 0 && vsm.significand)
- vfp_single_normalise_denormal(&vsm);
+ vfp_single_unpack(&vsm, m);
+ if (vsm.exponent == 0 && vsm.significand)
+ vfp_single_normalise_denormal(&vsm);
- exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
+ exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd");
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd");
}
/*
@@ -1067,11 +1145,11 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fsub(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
- /*
- * Subtraction is addition with one sign inverted.
- */
- return vfp_single_fadd(state, sd, sn, vfp_single_packed_negate(m), fpscr);
+ pr_debug("In %sVFP: s%u = %08x\n", __FUNCTION__, sn, sd);
+ /*
+ * Subtraction is addition with one sign inverted.
+ */
+ return vfp_single_fadd(state, sd, sn, vfp_single_packed_negate(m), fpscr);
}
/*
@@ -1079,107 +1157,107 @@ static u32 vfp_single_fsub(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
*/
static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
{
- struct vfp_single vsd, vsn, vsm;
- u32 exceptions = 0;
- s32 n = vfp_get_float(state, sn);
- int tm, tn;
-
- pr_debug("VFP: s%u = %08x\n", sn, n);
-
- vfp_single_unpack(&vsn, n);
- vfp_single_unpack(&vsm, m);
-
- vsd.sign = vsn.sign ^ vsm.sign;
-
- tn = vfp_single_type(&vsn);
- tm = vfp_single_type(&vsm);
-
- /*
- * Is n a NAN?
- */
- if (tn & VFP_NAN)
- goto vsn_nan;
-
- /*
- * Is m a NAN?
- */
- if (tm & VFP_NAN)
- goto vsm_nan;
-
- /*
- * If n and m are infinity, the result is invalid
- * If n and m are zero, the result is invalid
- */
- if (tm & tn & (VFP_INFINITY|VFP_ZERO))
- goto invalid;
-
- /*
- * If n is infinity, the result is infinity
- */
- if (tn & VFP_INFINITY)
- goto infinity;
-
- /*
- * If m is zero, raise div0 exception
- */
- if (tm & VFP_ZERO)
- goto divzero;
-
- /*
- * If m is infinity, or n is zero, the result is zero
- */
- if (tm & VFP_INFINITY || tn & VFP_ZERO)
- goto zero;
-
- if (tn & VFP_DENORMAL)
- vfp_single_normalise_denormal(&vsn);
- if (tm & VFP_DENORMAL)
- vfp_single_normalise_denormal(&vsm);
-
- /*
- * Ok, we have two numbers, we can perform division.
- */
- vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1;
- vsm.significand <<= 1;
- if (vsm.significand <= (2 * vsn.significand)) {
- vsn.significand >>= 1;
- vsd.exponent++;
- }
- {
- u64 significand = (u64)vsn.significand << 32;
- do_div(significand, vsm.significand);
- vsd.significand = significand;
- }
- if ((vsd.significand & 0x3f) == 0)
- vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
-
- return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv");
-
- vsn_nan:
- exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
- pack:
- vfp_put_float(state, vfp_single_pack(&vsd), sd);
- return exceptions;
-
- vsm_nan:
- exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
- goto pack;
-
- zero:
- vsd.exponent = 0;
- vsd.significand = 0;
- goto pack;
-
- divzero:
- exceptions = FPSCR_DZC;
- infinity:
- vsd.exponent = 255;
- vsd.significand = 0;
- goto pack;
-
- invalid:
- vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd);
- return FPSCR_IOC;
+ struct vfp_single vsd, vsn, vsm;
+ u32 exceptions = 0;
+ s32 n = vfp_get_float(state, sn);
+ int tm, tn;
+
+ pr_debug("VFP: s%u = %08x\n", sn, n);
+
+ vfp_single_unpack(&vsn, n);
+ vfp_single_unpack(&vsm, m);
+
+ vsd.sign = vsn.sign ^ vsm.sign;
+
+ tn = vfp_single_type(&vsn);
+ tm = vfp_single_type(&vsm);
+
+ /*
+ * Is n a NAN?
+ */
+ if (tn & VFP_NAN)
+ goto vsn_nan;
+
+ /*
+ * Is m a NAN?
+ */
+ if (tm & VFP_NAN)
+ goto vsm_nan;
+
+ /*
+ * If n and m are infinity, the result is invalid
+ * If n and m are zero, the result is invalid
+ */
+ if (tm & tn & (VFP_INFINITY|VFP_ZERO))
+ goto invalid;
+
+ /*
+ * If n is infinity, the result is infinity
+ */
+ if (tn & VFP_INFINITY)
+ goto infinity;
+
+ /*
+ * If m is zero, raise div0 exception
+ */
+ if (tm & VFP_ZERO)
+ goto divzero;
+
+ /*
+ * If m is infinity, or n is zero, the result is zero
+ */
+ if (tm & VFP_INFINITY || tn & VFP_ZERO)
+ goto zero;
+
+ if (tn & VFP_DENORMAL)
+ vfp_single_normalise_denormal(&vsn);
+ if (tm & VFP_DENORMAL)
+ vfp_single_normalise_denormal(&vsm);
+
+ /*
+ * Ok, we have two numbers, we can perform division.
+ */
+ vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1;
+ vsm.significand <<= 1;
+ if (vsm.significand <= (2 * vsn.significand)) {
+ vsn.significand >>= 1;
+ vsd.exponent++;
+ }
+ {
+ u64 significand = (u64)vsn.significand << 32;
+ do_div(significand, vsm.significand);
+ vsd.significand = (u32)significand;
+ }
+ if ((vsd.significand & 0x3f) == 0)
+ vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
+
+ return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv");
+
+vsn_nan:
+ exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
+pack:
+ vfp_put_float(state, vfp_single_pack(&vsd), sd);
+ return exceptions;
+
+vsm_nan:
+ exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
+ goto pack;
+
+zero:
+ vsd.exponent = 0;
+ vsd.significand = 0;
+ goto pack;
+
+divzero:
+ exceptions = FPSCR_DZC;
+infinity:
+ vsd.exponent = 255;
+ vsd.significand = 0;
+ goto pack;
+
+invalid:
+ vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd);
+ return FPSCR_IOC;
}
static struct op fops[] = {
@@ -1199,80 +1277,80 @@ static struct op fops[] = {
u32 vfp_single_cpdo(ARMul_State* state, u32 inst, u32 fpscr)
{
- u32 op = inst & FOP_MASK;
- u32 exceptions = 0;
- unsigned int dest;
- unsigned int sn = vfp_get_sn(inst);
- unsigned int sm = vfp_get_sm(inst);
- unsigned int vecitr, veclen, vecstride;
- struct op *fop;
- pr_debug("In %s\n", __FUNCTION__);
-
- vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
-
- fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
-
- /*
- * fcvtsd takes a dN register number as destination, not sN.
- * Technically, if bit 0 of dd is set, this is an invalid
- * instruction. However, we ignore this for efficiency.
- * It also only operates on scalars.
- */
- if (fop->flags & OP_DD)
- dest = vfp_get_dd(inst);
- else
- dest = vfp_get_sd(inst);
-
- /*
- * If destination bank is zero, vector length is always '1'.
- * ARM DDI0100F C5.1.3, C5.3.2.
- */
- if ((fop->flags & OP_SCALAR) || FREG_BANK(dest) == 0)
- veclen = 0;
- else
- veclen = fpscr & FPSCR_LENGTH_MASK;
-
- pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
- (veclen >> FPSCR_LENGTH_BIT) + 1);
-
- if (!fop->fn) {
- printf("VFP: could not find single op %d, inst=0x%x@0x%x\n", FEXT_TO_IDX(inst), inst, state->Reg[15]);
- exit(-1);
- goto invalid;
- }
-
- for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
- s32 m = vfp_get_float(state, sm);
- u32 except;
- char type;
-
- type = fop->flags & OP_DD ? 'd' : 's';
- if (op == FOP_EXT)
- pr_debug("VFP: itr%d (%c%u) = op[%u] (s%u=%08x)\n",
- vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
- sm, m);
- else
- pr_debug("VFP: itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)\n",
- vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
- FOP_TO_IDX(op), sm, m);
-
- except = fop->fn(state, dest, sn, m, fpscr);
- pr_debug("VFP: itr%d: exceptions=%08x\n",
- vecitr >> FPSCR_LENGTH_BIT, except);
-
- exceptions |= except;
-
- /*
- * CHECK: It appears to be undefined whether we stop when
- * we encounter an exception. We continue.
- */
- dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
- sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
- if (FREG_BANK(sm) != 0)
- sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);
- }
- return exceptions;
-
- invalid:
- return (u32)-1;
+ u32 op = inst & FOP_MASK;
+ u32 exceptions = 0;
+ unsigned int dest;
+ unsigned int sn = vfp_get_sn(inst);
+ unsigned int sm = vfp_get_sm(inst);
+ unsigned int vecitr, veclen, vecstride;
+ struct op *fop;
+ pr_debug("In %s\n", __FUNCTION__);
+
+ vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
+
+ fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
+
+ /*
+ * fcvtsd takes a dN register number as destination, not sN.
+ * Technically, if bit 0 of dd is set, this is an invalid
+ * instruction. However, we ignore this for efficiency.
+ * It also only operates on scalars.
+ */
+ if (fop->flags & OP_DD)
+ dest = vfp_get_dd(inst);
+ else
+ dest = vfp_get_sd(inst);
+
+ /*
+ * If destination bank is zero, vector length is always '1'.
+ * ARM DDI0100F C5.1.3, C5.3.2.
+ */
+ if ((fop->flags & OP_SCALAR) || FREG_BANK(dest) == 0)
+ veclen = 0;
+ else
+ veclen = fpscr & FPSCR_LENGTH_MASK;
+
+ pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
+ (veclen >> FPSCR_LENGTH_BIT) + 1);
+
+ if (!fop->fn) {
+ printf("VFP: could not find single op %d, inst=0x%x@0x%x\n", FEXT_TO_IDX(inst), inst, state->Reg[15]);
+ exit(-1);
+ goto invalid;
+ }
+
+ for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
+ s32 m = vfp_get_float(state, sm);
+ u32 except;
+ char type;
+
+ type = fop->flags & OP_DD ? 'd' : 's';
+ if (op == FOP_EXT)
+ pr_debug("VFP: itr%d (%c%u) = op[%u] (s%u=%08x)\n",
+ vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
+ sm, m);
+ else
+ pr_debug("VFP: itr%d (%c%u) = (s%u) op[%u] (s%u=%08x)\n",
+ vecitr >> FPSCR_LENGTH_BIT, type, dest, sn,
+ FOP_TO_IDX(op), sm, m);
+
+ except = fop->fn(state, dest, sn, m, fpscr);
+ pr_debug("VFP: itr%d: exceptions=%08x\n",
+ vecitr >> FPSCR_LENGTH_BIT, except);
+
+ exceptions |= except;
+
+ /*
+ * CHECK: It appears to be undefined whether we stop when
+ * we encounter an exception. We continue.
+ */
+ dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7);
+ sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7);
+ if (FREG_BANK(sm) != 0)
+ sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7);
+ }
+ return exceptions;
+
+invalid:
+ return (u32)-1;
}