summaryrefslogtreecommitdiff
path: root/runtime/arm
diff options
context:
space:
mode:
authorGravatar xleroy <xleroy@fca1b0fc-160b-0410-b1d3-a4f43f01ea2e>2013-05-06 08:25:39 +0000
committerGravatar xleroy <xleroy@fca1b0fc-160b-0410-b1d3-a4f43f01ea2e>2013-05-06 08:25:39 +0000
commit52808842306243ed922eb8e7cf0525f1d890198c (patch)
treea56f1f23a13c189aac1e964cfb7df6666168c8af /runtime/arm
parent1611de2462f56666fed59452e0a41e2251765d75 (diff)
Syntax errors
git-svn-id: https://yquem.inria.fr/compcert/svn/compcert/trunk@2237 fca1b0fc-160b-0410-b1d3-a4f43f01ea2e
Diffstat (limited to 'runtime/arm')
-rw-r--r--runtime/arm/i64_stof.S30
-rw-r--r--runtime/arm/i64_utof.S32
2 files changed, 31 insertions, 31 deletions
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index 165063a..22f2687 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -40,34 +40,34 @@
.global __i64_stof
__i64_stof:
- # Check whether -2^53 <= X < 2^53
+ @ Check whether -2^53 <= X < 2^53
mov r2, r1, asr #21
mov r3, r1, asr #31 @ (r2,r3) = X >> 53
adds r2, r2, #1
adc r3, r3, #0 @ (r2,r3) = X >> 53 + 1
cmp r3, #2
- blo 1b
- # X is large enough that double rounding can occur.
- # Avoid it by nudging X away from the points where double rounding
- # occurs (the "round to odd" technique)
+ blo 1f
+ @ X is large enough that double rounding can occur.
+ @ Avoid it by nudging X away from the points where double rounding
+ @ occurs (the "round to odd" technique)
mov r2, #0x700
- orr r2, r2, #0xFF # r2 = 0x7FF
- and r3, r0, r2 # extract bits 0 to 11 of X
- add r3, r3, r2 # r3 = (X & 0x7FF) + 0x7FF
- # bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
- # bits 13-31 of r3 are 0
- orr r0, r0, r3 # correct bit number 12 of X
- bic r0, r0, r2 # set to 0 bits 0 to 11 of X
- # Convert to double
+ orr r2, r2, #0xFF @ r2 = 0x7FF
+ and r3, r0, r2 @ extract bits 0 to 11 of X
+ add r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
+ @ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
+ @ bits 13-31 of r3 are 0
+ orr r0, r0, r3 @ correct bit number 12 of X
+ bic r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ @ Convert to double
1: fmsr s0, r0
fuitod d0, s0 @ convert low half to double (unsigned)
fmsr s2, r1
fsitod d1, s2 @ convert high half to double (signed)
fldd d2, .LC1 @ d2 = 2^32
fmacd d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
- # Round to single
+ @ Round to single
fcvtsd s0, d0
- # Return result in r0
+ @ Return result in r0
fmrs r0, s0
bx lr
.type __i64_stof, %function
diff --git a/runtime/arm/i64_utof.S b/runtime/arm/i64_utof.S
index ff21438..6fb11df 100644
--- a/runtime/arm/i64_utof.S
+++ b/runtime/arm/i64_utof.S
@@ -40,30 +40,30 @@
.global __i64_utof
__i64_utof:
- # Check whether X < 2^53
- movs r2, r1, lsr #21 # test if X >> 53 == 0
- beq 1b
- # X is large enough that double rounding can occur.
- # Avoid it by nudging X away from the points where double rounding
- # occurs (the "round to odd" technique)
+ @ Check whether X < 2^53
+ movs r2, r1, lsr #21 @ test if X >> 53 == 0
+ beq 1f
+ @ X is large enough that double rounding can occur.
+ @ Avoid it by nudging X away from the points where double rounding
+ @ occurs (the "round to odd" technique)
mov r2, #0x700
- orr r2, r2, #0xFF # r2 = 0x7FF
- and r3, r0, r2 # extract bits 0 to 11 of X
- add r3, r3, r2 # r3 = (X & 0x7FF) + 0x7FF
- # bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
- # bits 13-31 of r3 are 0
- orr r0, r0, r3 # correct bit number 12 of X
- bic r0, r0, r2 # set to 0 bits 0 to 11 of X
- # Convert to double
+ orr r2, r2, #0xFF @ r2 = 0x7FF
+ and r3, r0, r2 @ extract bits 0 to 11 of X
+ add r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
+ @ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
+ @ bits 13-31 of r3 are 0
+ orr r0, r0, r3 @ correct bit number 12 of X
+ bic r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ @ Convert to double
1: fmsr s0, r0
fuitod d0, s0 @ convert low half to double (unsigned)
fmsr s2, r1
fuitod d1, s2 @ convert high half to double (unsigned)
fldd d2, .LC1 @ d2 = 2^32
fmacd d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
- # Round to single
+ @ Round to single
fcvtsd s0, d0
- # Return result in r0
+ @ Return result in r0
fmrs r0, s0
bx lr
.type __i64_utof, %function