From 2dbe106c09b60690b87e31e58d505b1f4e05b57f Mon Sep 17 00:00:00 2001 From: aspiwack Date: Fri, 11 May 2007 17:00:58 +0000 Subject: Processor integers + Print assumption (see coqdev mailing list for the details). git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/coq/trunk@9821 85f007b7-540e-0410-9357-904b9bb8a0f7 --- kernel/byterun/coq_fix_code.c | 16 ++- kernel/byterun/coq_fix_code.h | 1 + kernel/byterun/coq_instruct.h | 12 +- kernel/byterun/coq_interp.c | 318 ++++++++++++++++++++++++++++++++++++++++-- kernel/byterun/int64_emul.h | 272 ++++++++++++++++++++++++++++++++++++ kernel/byterun/int64_native.h | 50 +++++++ 6 files changed, 651 insertions(+), 18 deletions(-) create mode 100644 kernel/byterun/int64_emul.h create mode 100644 kernel/byterun/int64_native.h (limited to 'kernel/byterun') diff --git a/kernel/byterun/coq_fix_code.c b/kernel/byterun/coq_fix_code.c index affcccb3a..961c49785 100644 --- a/kernel/byterun/coq_fix_code.c +++ b/kernel/byterun/coq_fix_code.c @@ -8,6 +8,9 @@ /* */ /***********************************************************************/ +/* Arnaud Spiwack: expanded the virtual machine with operators used + for fast computation of bounded (31bits) integers */ + #include #include #include @@ -37,7 +40,12 @@ void init_arity () { arity[GETFIELD0]=arity[GETFIELD1]=arity[SETFIELD0]=arity[SETFIELD1]= arity[CONST0]=arity[CONST1]=arity[CONST2]=arity[CONST3]= arity[PUSHCONST0]=arity[PUSHCONST1]=arity[PUSHCONST2]=arity[PUSHCONST3]= - arity[ACCUMULATE]=arity[STOP]=arity[MAKEPROD]= 0; + arity[ACCUMULATE]=arity[STOP]=arity[MAKEPROD]= + arity[ADDINT31]=arity[ADDCINT31]=arity[ADDCARRYCINT31]= + arity[SUBINT31]=arity[SUBCINT31]=arity[SUBCARRYCINT31]= + arity[MULCINT31]=arity[MULINT31]=arity[COMPAREINT31]= + arity[DIV21INT31]=arity[DIVINT31]=arity[ADDMULDIVINT31]= + arity[COMPINT31]=arity[DECOMPINT31]=0; /* instruction with one operand */ arity[ACC]=arity[PUSHACC]=arity[POP]=arity[ENVACC]=arity[PUSHENVACC]= arity[PUSH_RETADDR]=arity[APPLY]=arity[APPTERM1]=arity[APPTERM2]= @@ -45,9 +53,11 @@ void init_arity () { arity[PUSHOFFSETCLOSURE]=arity[GETGLOBAL]=arity[PUSHGETGLOBAL]= arity[MAKEBLOCK1]=arity[MAKEBLOCK2]=arity[MAKEBLOCK3]=arity[MAKEBLOCK4]= arity[MAKEACCU]=arity[CONSTINT]=arity[PUSHCONSTINT]=arity[GRABREC]= - arity[PUSHFIELDS]=arity[GETFIELD]=arity[SETFIELD]=arity[ACCUMULATECOND]= 1; + arity[PUSHFIELDS]=arity[GETFIELD]=arity[SETFIELD]=arity[ACCUMULATECOND]= + arity[BRANCH]=arity[ISCONST]= 1; /* instruction with two operands */ - arity[APPTERM]=arity[MAKEBLOCK]=arity[CLOSURE]=2; + arity[APPTERM]=arity[MAKEBLOCK]=arity[CLOSURE]= + arity[ARECONST]=2; /* instruction with four operands */ arity[MAKESWITCHBLOCK]=4; /* instruction with arbitrary operands */ diff --git a/kernel/byterun/coq_fix_code.h b/kernel/byterun/coq_fix_code.h index d1dac80fb..003453183 100644 --- a/kernel/byterun/coq_fix_code.h +++ b/kernel/byterun/coq_fix_code.h @@ -31,4 +31,5 @@ value coq_makeaccu (value i); value coq_pushpop (value i); value coq_accucond (value i); value coq_is_accumulate_code(value code); + #endif /* _COQ_FIX_CODE_ */ diff --git a/kernel/byterun/coq_instruct.h b/kernel/byterun/coq_instruct.h index 89616c5f3..00156ebe8 100644 --- a/kernel/byterun/coq_instruct.h +++ b/kernel/byterun/coq_instruct.h @@ -35,7 +35,17 @@ enum instructions { CONST0, CONST1, CONST2, CONST3, CONSTINT, PUSHCONST0, PUSHCONST1, PUSHCONST2, PUSHCONST3, PUSHCONSTINT, ACCUMULATE, ACCUMULATECOND, - MAKESWITCHBLOCK, MAKEACCU, MAKEPROD, STOP + MAKESWITCHBLOCK, MAKEACCU, MAKEPROD, +/* spiwack: */ + BRANCH, + ADDINT31, ADDCINT31, ADDCARRYCINT31, + SUBINT31, SUBCINT31, SUBCARRYCINT31, + MULCINT31, MULINT31, DIV21INT31, DIVINT31, + ADDMULDIVINT31, COMPAREINT31, + ISCONST, ARECONST, + COMPINT31, DECOMPINT31, +/* /spiwack */ + STOP }; #endif /* _COQ_INSTRUCT_ */ diff --git a/kernel/byterun/coq_interp.c b/kernel/byterun/coq_interp.c index 8f9c10e68..c3072dc72 100644 --- a/kernel/byterun/coq_interp.c +++ b/kernel/byterun/coq_interp.c @@ -10,12 +10,31 @@ /* The bytecode interpreter */ +/* Spiwack: expanded the virtual machine with operators used + for fast computation of bounded (31bits) integers */ + #include #include "coq_gc.h" #include "coq_instruct.h" #include "coq_fix_code.h" -#include "coq_memory.h" -#include "coq_values.h" +#include "coq_memory.h" +#include "coq_values.h" + +/*spiwack : imports support functions for 64-bit integers */ +#include "config.h" +#ifdef ARCH_INT64_TYPE +#include "int64_native.h" +#else +#include "int64_emul.h" +#endif + +/* spiwack: I append here a few macros for value/number manipulation */ +#define uint32_of_value(val) (((uint32)val >> 1)) +#define value_of_uint32(i) ((value)(((uint32)i << 1) | 1)) +#define UI64_of_uint32(i) ((uint64)I64_literal(0,i)) +#define UI64_of_value(val) (UI64_of_uint32(uint32_of_value(val))) +/* /spiwack */ + /* Registers for the abstract machine: @@ -61,13 +80,6 @@ sp is a local copy of the global variable extern_sp. */ # define print_int(i) #endif -/* Wrapper pour caml_modify */ -#ifdef OCAML_307 -#define CAML_MODIFY(a,b) modify(a,b) -#else -#define CAML_MODIFY(a,b) caml_modify(a,b) -#endif - /* GC interface */ #define Setup_for_gc { sp -= 2; sp[0] = accu; sp[1] = coq_env; coq_sp = sp; } #define Restore_after_gc { accu = sp[0]; coq_env = sp[1]; sp += 2; } @@ -640,7 +652,7 @@ value coq_interprete Field(accu, 0) = sp[0]; *sp = accu; /* mise a jour du block accumulate */ - CAML_MODIFY(&Field(p[i], 1),*sp); + caml_modify(&Field(p[i], 1),*sp); sp++; } pc += nfunc; @@ -811,7 +823,7 @@ value coq_interprete Instruct(SETFIELD0){ print_instr("SETFIELD0"); - CAML_MODIFY(&Field(accu, 0),*sp); + caml_modify(&Field(accu, 0),*sp); sp++; Next; } @@ -819,7 +831,7 @@ value coq_interprete Instruct(SETFIELD1){ int i, j, size, size_aux; print_instr("SETFIELD1"); - CAML_MODIFY(&Field(accu, 1),*sp); + caml_modify(&Field(accu, 1),*sp); sp++; Next; } @@ -848,7 +860,7 @@ value coq_interprete Instruct(SETFIELD){ print_instr("SETFIELD"); - CAML_MODIFY(&Field(accu, *pc),*sp); + caml_modify(&Field(accu, *pc),*sp); sp++; pc++; Next; } @@ -1035,7 +1047,285 @@ value coq_interprete sp += 2; Next; } - + + /* spiwack: code for interpreting compiled integers */ + Instruct(BRANCH) { + /* unconditional branching */ + print_instr("BRANCH"); + pc += *pc; + /* pc = (code_t)(pc+*pc); */ + Next; + } + + Instruct(ADDINT31) { + /* Adds the integer in the accumulator with + the one ontop of the stack (which is poped)*/ + print_instr("ADDINT31"); + accu = + (value)((uint32) accu + (uint32) *sp++ - 1); + /* nota,unlike CaML we don't want + to have a different behavior depending on the + architecture. Thus we cast the operand to uint32 */ + Next; + } + + Instruct (ADDCINT31) { + print_instr("ADDCINT31"); + /* returns the sum with a carry */ + uint32 s; + s = (uint32)accu + (uint32)*sp++ - 1; + if( (uint32)s < (uint32)accu ) { + /* carry */ + Alloc_small(accu, 1, 2); /* ( _ , arity, tag ) */ + } + else { + /*no carry */ + Alloc_small(accu, 1, 1); + } + Field(accu, 0)=(value)s; + Next; + } + + Instruct (ADDCARRYCINT31) { + print_instr("ADDCARRYCINT31"); + /* returns the sum plus one with a carry */ + uint32 s; + s = (uint32)accu + (uint32)*sp++ + 1; + value block; + if( (uint32)s <= (uint32)accu ) { + /* carry */ + Alloc_small(accu, 1, 2); /* ( _ , arity, tag ) */ + } + else { + /*no carry */ + Alloc_small(accu, 1, 1); + } + Field(accu, 0)=(value)s; + Next; + } + + Instruct (SUBINT31) { + print_instr("SUBINT31"); + /* returns the subtraction */ + accu = + (value)((uint32) accu - (uint32) *sp++ + 1); + Next; + } + + Instruct (SUBCINT31) { + print_instr("SUBCINT31"); + /* returns the subtraction with a carry */ + uint32 b; + uint32 s; + b = (uint32)*sp++; + s = (uint32)accu - b + 1; + if( (uint32)accu < b ) { + /* carry */ + Alloc_small(accu, 1, 2); /* ( _ , arity, tag ) */ + } + else { + /*no carry */ + Alloc_small(accu, 1, 1); + } + Field(accu, 0)=(value)s; + Next; + } + + Instruct (SUBCARRYCINT31) { + print_instr("SUBCARRYCINT31"); + /* returns the subtraction minus one with a carry */ + uint32 b; + uint32 s; + b = (uint32)*sp++; + s = (value)((uint32)accu - b - 1); + if( (uint32)accu <= b ) { + /* carry */ + Alloc_small(accu, 1, 2); /* ( _ , arity, tag ) */ + } + else { + /*no carry */ + Alloc_small(accu, 1, 1); + } + Field(accu, 0)=(value)s; + Next; + } + + Instruct (MULINT31) { + /* returns the multiplication */ + print_instr("MULINT31"); + accu = + value_of_uint32((uint32_of_value(accu)) * (uint32_of_value(*sp++))); + Next; + } + + Instruct (MULCINT31) { + /*returns the multiplication on a double size word + (special case for 0) */ + print_instr("MULCINT31"); + uint64 p; + /*accu = 2v+1, *sp=2w+1 ==> p = 2v*w */ + p = I64_mul (UI64_of_value (accu), UI64_of_uint32 ((*sp++)^1)); + if ( I64_is_zero(p) ) { + accu = (value)1; + } + else { + /* the output type is supposed to have a constant constructor + and a non-constant constructor (in that order), the tag + of the non-constant constructor is then 1 */ + Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ + /*unsigned shift*/ + Field(accu, 0) = (value)(I64_lsr(p,31)|1) ; /*higher part*/ + Field(accu, 1) = (value)(I64_to_int32(p)|1); /*lower part*/ + } + Next; + } + + Instruct (DIV21INT31) { + print_instr("DIV21INT31"); + /* spiwack: takes three int31 (the two first ones represent an + int62) and perfoms the euclidian division of the + int62 by the int31 */ + uint64 bigint; + bigint = UI64_of_value(accu); + bigint = I64_or(I64_lsl(bigint, 31),UI64_of_value(*sp++)); + uint64 divisor; + divisor = UI64_of_value(*sp++); + Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ + if (I64_is_zero (divisor)) { + Field(accu, 0) = 1; /* 2*0+1 */ + Field(accu, 1) = 1; /* 2*0+1 */ + } + else { + uint64 quo, mod; + I64_udivmod(bigint, divisor, &quo, &mod); + Field(accu, 0) = value_of_uint32(I64_to_int32(quo)); + Field(accu, 1) = value_of_uint32(I64_to_int32(mod)); + } + Next; + } + + Instruct (DIVINT31) { + print_instr("DIVINT31"); + /* spiwack: a priori no need of the NON_STANDARD_DIV_MOD flag + since it probably only concerns negative number. + needs to be checked at this point */ + uint32 divisor; + divisor = uint32_of_value(*sp++); + if (divisor == 0) { + Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ + Field(accu, 0) = 1; /* 2*0+1 */ + Field(accu, 1) = 1; /* 2*0+1 */ + } + else { + uint32 modulus; + modulus = uint32_of_value(accu); + Alloc_small(accu, 2, 1); /* ( _ , arity, tag ) */ + Field(accu, 0) = value_of_uint32(modulus/divisor); + Field(accu, 1) = value_of_uint32(modulus%divisor); + } + Next; + } + + Instruct (ADDMULDIVINT31) { + print_instr("ADDMULDIVINT31"); + /* higher level shift (does shifts and cycles and such) */ + uint32 shiftby; + shiftby = uint32_of_value(accu); + /* *sp = 2*x+1 --> accu = 2^(shiftby+1)*x */ + accu = (value)(((*sp++)^1) << shiftby); + /* accu = 2^(shiftby+1)*x --> 2^(shifby+1)*x+2*y/2^(31-shiftby)+1 */ + accu = (value)((accu | ((*sp++) >> (31-shiftby)))|1); + Next; + } + + Instruct (COMPAREINT31) { + /* returns Eq if equal, Lt if accu is less than *sp, Gt otherwise */ + /* assumes Inudctive _ : _ := Eq | Lt | Gt */ + print_instr("COMPAREINT31"); + if ((uint32)accu == (uint32)*sp) { + accu = 1; /* 2*0+1 */ + sp++; + } + else{if ((uint32)accu < (uint32)(*sp++)) { + accu = 3; /* 2*1+1 */ + } + else{ + accu = 5; /* 2*2+1 */ + }} + Next; + } + + Instruct (ISCONST) { + /* Branches if the accu does not contain a constant + (i.e., a non-block value) */ + print_instr("ISCONST"); + if ((accu & 1) == 0) /* last bit is 0 -> it is a block */ + pc += *pc; + else + pc++; + Next; + + } + + Instruct (ARECONST) { + /* Branches if the n first values on the stack are not + all constansts */ + print_instr("ARECONST"); + int i, n, ok; + ok = 1; + n = *pc++; + for(i=0; i < n; i++) { + if ((sp[i] & 1) == 0) { + ok = 0; + break; + } + } + if(ok) pc++; else pc += *pc; + Next; + } + + Instruct (COMPINT31) { + /* makes an 31-bit integer out of the accumulator and + the 30 first values of the stack + and put it in the accumulator (the accumulator then the + topmost get to be the heavier bits) */ + print_instr("COMPINT31"); + int i; + /*accu=accu or accu = (value)((unsigned long)1-accu) if bool + is used for the bits */ + for(i=0; i < 30; i++) { + accu = (value) ((((uint32)accu-1) << 1) | *sp++); + /* -1 removes the tag bit, << 1 multiplies the value by 2, + | *sp++ pops the last value and add it (no carry involved) + not that it reintroduces a tag bit */ + /* alternative, if bool is used for the bits : + accu = (value) ((((unsigned long)accu) << 1) & !*sp++); */ + } + Next; + } + + Instruct (DECOMPINT31) { + /* builds a block out of a 31-bit integer (from the accumulator), + used before cases */ + int i; + value block; + print_instr("DECOMPINT31"); + Alloc_small(block, 31, 1); // Alloc_small(*, size, tag) + for(i = 30; i >= 0; i--) { + Field(block, i) = (value)(accu & 3); /* two last bits of the accumulator */ + //Field(block, i) = 3; + accu = (value) ((uint32)accu >> 1) | 1; /* last bit must be a one */ + }; + accu = block; + Next; + } + + + + /* /spiwack */ + + + /* Debugging and machine control */ Instruct(STOP){ diff --git a/kernel/byterun/int64_emul.h b/kernel/byterun/int64_emul.h new file mode 100644 index 000000000..ba8a60149 --- /dev/null +++ b/kernel/byterun/int64_emul.h @@ -0,0 +1,272 @@ +/***********************************************************************/ +/* */ +/* Objective Caml */ +/* */ +/* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ +/* */ +/* Copyright 2002 Institut National de Recherche en Informatique et */ +/* en Automatique. All rights reserved. This file is distributed */ +/* under the terms of the GNU Library General Public License, with */ +/* the special exception on linking described in file ../LICENSE. */ +/* */ +/***********************************************************************/ + +/* $Id: int64_emul.h,v 1.5 2005/09/22 14:21:50 xleroy Exp $ */ + +/* Software emulation of 64-bit integer arithmetic, for C compilers + that do not support it. */ + +#ifndef CAML_INT64_EMUL_H +#define CAML_INT64_EMUL_H + +#include + +#ifdef ARCH_BIG_ENDIAN +#define I64_literal(hi,lo) { hi, lo } +#else +#define I64_literal(hi,lo) { lo, hi } +#endif + +/* Unsigned comparison */ +static int I64_ucompare(uint64 x, uint64 y) +{ + if (x.h > y.h) return 1; + if (x.h < y.h) return -1; + if (x.l > y.l) return 1; + if (x.l < y.l) return -1; + return 0; +} + +#define I64_ult(x, y) (I64_ucompare(x, y) < 0) + +/* Signed comparison */ +static int I64_compare(int64 x, int64 y) +{ + if ((int32)x.h > (int32)y.h) return 1; + if ((int32)x.h < (int32)y.h) return -1; + if (x.l > y.l) return 1; + if (x.l < y.l) return -1; + return 0; +} + +/* Negation */ +static int64 I64_neg(int64 x) +{ + int64 res; + res.l = -x.l; + res.h = ~x.h; + if (res.l == 0) res.h++; + return res; +} + +/* Addition */ +static int64 I64_add(int64 x, int64 y) +{ + int64 res; + res.l = x.l + y.l; + res.h = x.h + y.h; + if (res.l < x.l) res.h++; + return res; +} + +/* Subtraction */ +static int64 I64_sub(int64 x, int64 y) +{ + int64 res; + res.l = x.l - y.l; + res.h = x.h - y.h; + if (x.l < y.l) res.h--; + return res; +} + +/* Multiplication */ +static int64 I64_mul(int64 x, int64 y) +{ + int64 res; + uint32 prod00 = (x.l & 0xFFFF) * (y.l & 0xFFFF); + uint32 prod10 = (x.l >> 16) * (y.l & 0xFFFF); + uint32 prod01 = (x.l & 0xFFFF) * (y.l >> 16); + uint32 prod11 = (x.l >> 16) * (y.l >> 16); + res.l = prod00; + res.h = prod11 + (prod01 >> 16) + (prod10 >> 16); + prod01 = prod01 << 16; res.l += prod01; if (res.l < prod01) res.h++; + prod10 = prod10 << 16; res.l += prod10; if (res.l < prod10) res.h++; + res.h += x.l * y.h + x.h * y.l; + return res; +} + +#define I64_is_zero(x) (((x).l | (x).h) == 0) + +#define I64_is_negative(x) ((int32) (x).h < 0) + +/* Bitwise operations */ +static int64 I64_and(int64 x, int64 y) +{ + int64 res; + res.l = x.l & y.l; + res.h = x.h & y.h; + return res; +} + +static int64 I64_or(int64 x, int64 y) +{ + int64 res; + res.l = x.l | y.l; + res.h = x.h | y.h; + return res; +} + +static int64 I64_xor(int64 x, int64 y) +{ + int64 res; + res.l = x.l ^ y.l; + res.h = x.h ^ y.h; + return res; +} + +/* Shifts */ +static int64 I64_lsl(int64 x, int s) +{ + int64 res; + s = s & 63; + if (s == 0) return x; + if (s < 32) { + res.l = x.l << s; + res.h = (x.h << s) | (x.l >> (32 - s)); + } else { + res.l = 0; + res.h = x.l << (s - 32); + } + return res; +} + +static int64 I64_lsr(int64 x, int s) +{ + int64 res; + s = s & 63; + if (s == 0) return x; + if (s < 32) { + res.l = (x.l >> s) | (x.h << (32 - s)); + res.h = x.h >> s; + } else { + res.l = x.h >> (s - 32); + res.h = 0; + } + return res; +} + +static int64 I64_asr(int64 x, int s) +{ + int64 res; + s = s & 63; + if (s == 0) return x; + if (s < 32) { + res.l = (x.l >> s) | (x.h << (32 - s)); + res.h = (int32) x.h >> s; + } else { + res.l = (int32) x.h >> (s - 32); + res.h = (int32) x.h >> 31; + } + return res; +} + +/* Division and modulus */ + +#define I64_SHL1(x) x.h = (x.h << 1) | (x.l >> 31); x.l <<= 1 +#define I64_SHR1(x) x.l = (x.l >> 1) | (x.h << 31); x.h >>= 1 + +static void I64_udivmod(uint64 modulus, uint64 divisor, + uint64 * quo, uint64 * mod) +{ + int64 quotient, mask; + int cmp; + + quotient.h = 0; quotient.l = 0; + mask.h = 0; mask.l = 1; + while ((int32) divisor.h >= 0) { + cmp = I64_ucompare(divisor, modulus); + I64_SHL1(divisor); + I64_SHL1(mask); + if (cmp >= 0) break; + } + while (mask.l | mask.h) { + if (I64_ucompare(modulus, divisor) >= 0) { + quotient.h |= mask.h; quotient.l |= mask.l; + modulus = I64_sub(modulus, divisor); + } + I64_SHR1(mask); + I64_SHR1(divisor); + } + *quo = quotient; + *mod = modulus; +} + +static int64 I64_div(int64 x, int64 y) +{ + int64 q, r; + int32 sign; + + sign = x.h ^ y.h; + if ((int32) x.h < 0) x = I64_neg(x); + if ((int32) y.h < 0) y = I64_neg(y); + I64_udivmod(x, y, &q, &r); + if (sign < 0) q = I64_neg(q); + return q; +} + +static int64 I64_mod(int64 x, int64 y) +{ + int64 q, r; + int32 sign; + + sign = x.h; + if ((int32) x.h < 0) x = I64_neg(x); + if ((int32) y.h < 0) y = I64_neg(y); + I64_udivmod(x, y, &q, &r); + if (sign < 0) r = I64_neg(r); + return r; +} + +/* Coercions */ + +static int64 I64_of_int32(int32 x) +{ + int64 res; + res.l = x; + res.h = x >> 31; + return res; +} + +#define I64_to_int32(x) ((int32) (x).l) + +/* Note: we assume sizeof(intnat) = 4 here, which is true otherwise + autoconfiguration would have selected native 64-bit integers */ +#define I64_of_intnat I64_of_int32 +#define I64_to_intnat I64_to_int32 + +static double I64_to_double(int64 x) +{ + double res; + int32 sign = x.h; + if (sign < 0) x = I64_neg(x); + res = ldexp((double) x.h, 32) + x.l; + if (sign < 0) res = -res; + return res; +} + +static int64 I64_of_double(double f) +{ + int64 res; + double frac, integ; + int neg; + + neg = (f < 0); + f = fabs(f); + frac = modf(ldexp(f, -32), &integ); + res.h = (uint32) integ; + res.l = (uint32) ldexp(frac, 32); + if (neg) res = I64_neg(res); + return res; +} + +#endif /* CAML_INT64_EMUL_H */ diff --git a/kernel/byterun/int64_native.h b/kernel/byterun/int64_native.h new file mode 100644 index 000000000..2341e9989 --- /dev/null +++ b/kernel/byterun/int64_native.h @@ -0,0 +1,50 @@ +/***********************************************************************/ +/* */ +/* Objective Caml */ +/* */ +/* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ +/* */ +/* Copyright 2002 Institut National de Recherche en Informatique et */ +/* en Automatique. All rights reserved. This file is distributed */ +/* under the terms of the GNU Library General Public License, with */ +/* the special exception on linking described in file ../LICENSE. */ +/* */ +/***********************************************************************/ + +/* $Id: int64_native.h,v 1.5 2005/09/22 14:21:50 xleroy Exp $ */ + +/* Wrapper macros around native 64-bit integer arithmetic, + so that it has the same interface as the software emulation + provided in int64_emul.h */ + +#ifndef CAML_INT64_NATIVE_H +#define CAML_INT64_NATIVE_H + +#define I64_literal(hi,lo) ((int64)(hi) << 32 | (lo)) +#define I64_compare(x,y) (((x) > (y)) - ((x) < (y))) +#define I64_ult(x,y) ((uint64)(x) < (uint64)(y)) +#define I64_neg(x) (-(x)) +#define I64_add(x,y) ((x) + (y)) +#define I64_sub(x,y) ((x) - (y)) +#define I64_mul(x,y) ((x) * (y)) +#define I64_is_zero(x) ((x) == 0) +#define I64_is_negative(x) ((x) < 0) +#define I64_div(x,y) ((x) / (y)) +#define I64_mod(x,y) ((x) % (y)) +#define I64_udivmod(x,y,quo,rem) \ + (*(rem) = (uint64)(x) % (uint64)(y), \ + *(quo) = (uint64)(x) / (uint64)(y)) +#define I64_and(x,y) ((x) & (y)) +#define I64_or(x,y) ((x) | (y)) +#define I64_xor(x,y) ((x) ^ (y)) +#define I64_lsl(x,y) ((x) << (y)) +#define I64_asr(x,y) ((x) >> (y)) +#define I64_lsr(x,y) ((uint64)(x) >> (y)) +#define I64_to_intnat(x) ((intnat) (x)) +#define I64_of_intnat(x) ((intnat) (x)) +#define I64_to_int32(x) ((int32) (x)) +#define I64_of_int32(x) ((int64) (x)) +#define I64_to_double(x) ((double)(x)) +#define I64_of_double(x) ((int64)(x)) + +#endif /* CAML_INT64_NATIVE_H */ -- cgit v1.2.3