summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.depend31
-rw-r--r--LICENSE176
-rw-r--r--Makefile16
-rw-r--r--arm/PrintAsm.ml10
-rw-r--r--arm/PrintOp.ml2
-rw-r--r--backend/CMparser.mly10
-rw-r--r--backend/CMtypecheck.ml2
-rw-r--r--backend/PrintCminor.ml6
-rw-r--r--cfrontend/C2C.ml2
-rw-r--r--cfrontend/PrintClight.ml2
-rw-r--r--cfrontend/PrintCsyntax.ml6
-rw-r--r--common/Memory.v2
-rwxr-xr-xcoq2
-rw-r--r--driver/Interp.ml13
-rw-r--r--extraction/extraction.v28
-rw-r--r--flocq/Appli/Fappli_IEEE.v1617
-rw-r--r--flocq/Appli/Fappli_IEEE_bits.v546
-rw-r--r--flocq/Calc/Fcalc_bracket.v688
-rw-r--r--flocq/Calc/Fcalc_digits.v382
-rw-r--r--flocq/Calc/Fcalc_div.v165
-rw-r--r--flocq/Calc/Fcalc_ops.v161
-rw-r--r--flocq/Calc/Fcalc_round.v1093
-rw-r--r--flocq/Calc/Fcalc_sqrt.v346
-rw-r--r--flocq/Core/Fcore.v30
-rw-r--r--flocq/Core/Fcore_FIX.v87
-rw-r--r--flocq/Core/Fcore_FLT.v250
-rw-r--r--flocq/Core/Fcore_FLX.v233
-rw-r--r--flocq/Core/Fcore_FTZ.v330
-rw-r--r--flocq/Core/Fcore_Raux.v1996
-rw-r--r--flocq/Core/Fcore_Zaux.v774
-rw-r--r--flocq/Core/Fcore_defs.v101
-rw-r--r--flocq/Core/Fcore_digits.v899
-rw-r--r--flocq/Core/Fcore_float_prop.v488
-rw-r--r--flocq/Core/Fcore_generic_fmt.v2232
-rw-r--r--flocq/Core/Fcore_rnd.v1394
-rw-r--r--flocq/Core/Fcore_rnd_ne.v531
-rw-r--r--flocq/Core/Fcore_ulp.v1142
-rw-r--r--flocq/Flocq_version.v31
-rw-r--r--flocq/Prop/Fprop_Sterbenz.v169
-rw-r--r--flocq/Prop/Fprop_div_sqrt_error.v299
-rw-r--r--flocq/Prop/Fprop_mult_error.v237
-rw-r--r--flocq/Prop/Fprop_plus_error.v234
-rw-r--r--flocq/Prop/Fprop_relative.v699
-rw-r--r--ia32/PrintAsm.ml12
-rw-r--r--ia32/PrintOp.ml2
-rw-r--r--lib/Camlcoq.ml8
-rw-r--r--lib/Floataux.ml51
-rw-r--r--lib/Floats.v777
-rwxr-xr-xpg2
-rw-r--r--powerpc/PrintAsm.ml11
-rw-r--r--powerpc/PrintOp.ml2
-rw-r--r--test/c/Makefile10
-rw-r--r--test/c/Results/floats1
-rw-r--r--test/c/floats.c7944
54 files changed, 26114 insertions, 168 deletions
diff --git a/.depend b/.depend
index 5cc392f..df0d874 100644
--- a/.depend
+++ b/.depend
@@ -7,7 +7,7 @@ lib/Lattice.vo lib/Lattice.glob: lib/Lattice.v lib/Coqlib.vo lib/Maps.vo
lib/Ordered.vo lib/Ordered.glob: lib/Ordered.v lib/Coqlib.vo lib/Maps.vo lib/Integers.vo
lib/Iteration.vo lib/Iteration.glob: lib/Iteration.v lib/Axioms.vo lib/Coqlib.vo lib/Wfsimpl.vo
lib/Integers.vo lib/Integers.glob: lib/Integers.v lib/Axioms.vo lib/Coqlib.vo
-lib/Floats.vo lib/Floats.glob: lib/Floats.v lib/Coqlib.vo lib/Integers.vo
+lib/Floats.vo lib/Floats.glob: lib/Floats.v lib/Axioms.vo lib/Coqlib.vo lib/Integers.vo flocq/Appli/Fappli_IEEE.vo flocq/Appli/Fappli_IEEE_bits.vo flocq/Core/Fcore.vo
lib/Parmov.vo lib/Parmov.glob: lib/Parmov.v lib/Axioms.vo lib/Coqlib.vo
lib/UnionFind.vo lib/UnionFind.glob: lib/UnionFind.v lib/Coqlib.vo
lib/Wfsimpl.vo lib/Wfsimpl.glob: lib/Wfsimpl.v lib/Axioms.vo
@@ -52,7 +52,7 @@ backend/Constpropproof.vo backend/Constpropproof.glob: backend/Constpropproof.v
$(ARCH)/CombineOp.vo $(ARCH)/CombineOp.glob: $(ARCH)/CombineOp.v lib/Coqlib.vo common/AST.vo lib/Integers.vo $(ARCH)/Op.vo $(ARCH)/SelectOp.vo
backend/CSE.vo backend/CSE.glob: backend/CSE.v lib/Coqlib.vo lib/Maps.vo common/Errors.vo common/AST.vo lib/Integers.vo lib/Floats.vo common/Values.vo common/Memory.vo common/Globalenvs.vo $(ARCH)/Op.vo backend/Registers.vo backend/RTL.vo backend/RTLtyping.vo backend/Kildall.vo $(ARCH)/CombineOp.vo
$(ARCH)/CombineOpproof.vo $(ARCH)/CombineOpproof.glob: $(ARCH)/CombineOpproof.v lib/Coqlib.vo common/AST.vo lib/Integers.vo common/Values.vo common/Memory.vo $(ARCH)/Op.vo backend/Registers.vo backend/RTL.vo $(ARCH)/CombineOp.vo backend/CSE.vo
-backend/CSEproof.vo backend/CSEproof.glob: backend/CSEproof.v lib/Coqlib.vo lib/Maps.vo common/AST.vo common/Errors.vo lib/Integers.vo lib/Floats.vo common/Values.vo common/Memory.vo common/Events.vo common/Globalenvs.vo common/Smallstep.vo $(ARCH)/Op.vo backend/Registers.vo backend/RTL.vo backend/RTLtyping.vo backend/Kildall.vo backend/CSE.vo
+backend/CSEproof.vo backend/CSEproof.glob: backend/CSEproof.v lib/Coqlib.vo lib/Maps.vo common/AST.vo common/Errors.vo lib/Integers.vo lib/Floats.vo common/Values.vo common/Memory.vo common/Events.vo common/Globalenvs.vo common/Smallstep.vo $(ARCH)/Op.vo backend/Registers.vo backend/RTL.vo backend/RTLtyping.vo backend/Kildall.vo $(ARCH)/CombineOp.vo $(ARCH)/CombineOpproof.vo backend/CSE.vo
$(ARCH)/Machregs.vo $(ARCH)/Machregs.glob: $(ARCH)/Machregs.v lib/Coqlib.vo lib/Maps.vo common/AST.vo
backend/Locations.vo backend/Locations.glob: backend/Locations.v lib/Coqlib.vo lib/Maps.vo common/AST.vo common/Values.vo $(ARCH)/Machregs.vo
$(ARCH)/$(VARIANT)/Conventions1.vo $(ARCH)/$(VARIANT)/Conventions1.glob: $(ARCH)/$(VARIANT)/Conventions1.v lib/Coqlib.vo common/AST.vo backend/Locations.vo
@@ -115,3 +115,30 @@ cfrontend/Cminorgen.vo cfrontend/Cminorgen.glob: cfrontend/Cminorgen.v lib/Coqli
cfrontend/Cminorgenproof.vo cfrontend/Cminorgenproof.glob: cfrontend/Cminorgenproof.v lib/Coqlib.vo lib/Intv.vo common/Errors.vo lib/Maps.vo common/AST.vo lib/Integers.vo lib/Floats.vo common/Values.vo common/Memdata.vo common/Memory.vo common/Events.vo common/Globalenvs.vo common/Smallstep.vo common/Switch.vo cfrontend/Csharpminor.vo backend/Cminor.vo cfrontend/Cminorgen.vo
driver/Compiler.vo driver/Compiler.glob: driver/Compiler.v lib/Axioms.vo lib/Coqlib.vo lib/Maps.vo common/Errors.vo common/AST.vo common/Values.vo common/Smallstep.vo cfrontend/Csyntax.vo cfrontend/Csem.vo cfrontend/Cstrategy.vo cfrontend/Cexec.vo cfrontend/Clight.vo cfrontend/Csharpminor.vo backend/Cminor.vo backend/CminorSel.vo backend/RTL.vo backend/LTL.vo backend/LTLin.vo backend/Linear.vo backend/Mach.vo backend/Machsem.vo $(ARCH)/Asm.vo cfrontend/Initializers.vo cfrontend/SimplExpr.vo cfrontend/Cshmgen.vo cfrontend/Cminorgen.vo backend/Selection.vo backend/RTLgen.vo backend/Tailcall.vo backend/Inlining.vo backend/Renumber.vo backend/Constprop.vo backend/CSE.vo backend/Allocation.vo backend/Tunneling.vo backend/Linearize.vo backend/CleanupLabels.vo backend/Reload.vo backend/RRE.vo backend/Stacking.vo $(ARCH)/Asmgen.vo backend/RTLtyping.vo backend/LTLtyping.vo backend/LTLintyping.vo backend/Lineartyping.vo backend/Machtyping.vo cfrontend/SimplExprproof.vo cfrontend/Cshmgenproof.vo cfrontend/Cminorgenproof.vo backend/Selectionproof.vo backend/RTLgenproof.vo backend/Tailcallproof.vo backend/Inliningproof.vo backend/Renumberproof.vo backend/Constpropproof.vo backend/CSEproof.vo backend/Allocproof.vo backend/Alloctyping.vo backend/Tunnelingproof.vo backend/Tunnelingtyping.vo backend/Linearizeproof.vo backend/Linearizetyping.vo backend/CleanupLabelsproof.vo backend/CleanupLabelstyping.vo backend/Reloadproof.vo backend/Reloadtyping.vo backend/RREproof.vo backend/RREtyping.vo backend/Stackingproof.vo backend/Stackingtyping.vo $(ARCH)/Asmgenproof.vo
driver/Complements.vo driver/Complements.glob: driver/Complements.v lib/Coqlib.vo common/AST.vo lib/Integers.vo common/Values.vo common/Events.vo common/Globalenvs.vo common/Smallstep.vo common/Behaviors.vo cfrontend/Csyntax.vo cfrontend/Csem.vo cfrontend/Cstrategy.vo cfrontend/Clight.vo backend/Cminor.vo backend/RTL.vo $(ARCH)/Asm.vo driver/Compiler.vo common/Errors.vo
+flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_float_prop.glob: flocq/Core/Fcore_float_prop.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo
+flocq/Core/Fcore_Zaux.vo flocq/Core/Fcore_Zaux.glob: flocq/Core/Fcore_Zaux.v
+flocq/Core/Fcore_rnd_ne.vo flocq/Core/Fcore_rnd_ne.glob: flocq/Core/Fcore_rnd_ne.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_ulp.vo
+flocq/Core/Fcore_FTZ.vo flocq/Core/Fcore_FTZ.glob: flocq/Core/Fcore_FTZ.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_FLX.vo
+flocq/Core/Fcore_FLT.vo flocq/Core/Fcore_FLT.glob: flocq/Core/Fcore_FLT.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_FLX.vo flocq/Core/Fcore_FIX.vo flocq/Core/Fcore_rnd_ne.vo
+flocq/Core/Fcore_defs.vo flocq/Core/Fcore_defs.glob: flocq/Core/Fcore_defs.v flocq/Core/Fcore_Raux.vo
+flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_Raux.glob: flocq/Core/Fcore_Raux.v flocq/Core/Fcore_Zaux.vo
+flocq/Core/Fcore_ulp.vo flocq/Core/Fcore_ulp.glob: flocq/Core/Fcore_ulp.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_float_prop.vo
+flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_rnd.glob: flocq/Core/Fcore_rnd.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo
+flocq/Core/Fcore_FLX.vo flocq/Core/Fcore_FLX.glob: flocq/Core/Fcore_FLX.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_FIX.vo flocq/Core/Fcore_rnd_ne.vo
+flocq/Core/Fcore_FIX.vo flocq/Core/Fcore_FIX.glob: flocq/Core/Fcore_FIX.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_rnd_ne.vo
+flocq/Core/Fcore_digits.vo flocq/Core/Fcore_digits.glob: flocq/Core/Fcore_digits.v flocq/Core/Fcore_Zaux.vo
+flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_generic_fmt.glob: flocq/Core/Fcore_generic_fmt.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_float_prop.vo
+flocq/Core/Fcore.vo flocq/Core/Fcore.glob: flocq/Core/Fcore.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_rnd.vo flocq/Core/Fcore_generic_fmt.vo flocq/Core/Fcore_rnd_ne.vo flocq/Core/Fcore_FIX.vo flocq/Core/Fcore_FLX.vo flocq/Core/Fcore_FLT.vo flocq/Core/Fcore_ulp.vo
+flocq/Prop/Fprop_Sterbenz.vo flocq/Prop/Fprop_Sterbenz.glob: flocq/Prop/Fprop_Sterbenz.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_generic_fmt.vo flocq/Calc/Fcalc_ops.vo
+flocq/Prop/Fprop_mult_error.vo flocq/Prop/Fprop_mult_error.glob: flocq/Prop/Fprop_mult_error.v flocq/Core/Fcore.vo flocq/Calc/Fcalc_ops.vo
+flocq/Prop/Fprop_relative.vo flocq/Prop/Fprop_relative.glob: flocq/Prop/Fprop_relative.v flocq/Core/Fcore.vo
+flocq/Prop/Fprop_div_sqrt_error.vo flocq/Prop/Fprop_div_sqrt_error.glob: flocq/Prop/Fprop_div_sqrt_error.v flocq/Core/Fcore.vo flocq/Calc/Fcalc_ops.vo flocq/Prop/Fprop_relative.vo
+flocq/Prop/Fprop_plus_error.vo flocq/Prop/Fprop_plus_error.glob: flocq/Prop/Fprop_plus_error.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_generic_fmt.vo flocq/Calc/Fcalc_ops.vo
+flocq/Calc/Fcalc_ops.vo flocq/Calc/Fcalc_ops.glob: flocq/Calc/Fcalc_ops.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo
+flocq/Calc/Fcalc_bracket.vo flocq/Calc/Fcalc_bracket.glob: flocq/Calc/Fcalc_bracket.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo
+flocq/Calc/Fcalc_sqrt.vo flocq/Calc/Fcalc_sqrt.glob: flocq/Calc/Fcalc_sqrt.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_digits.vo flocq/Core/Fcore_float_prop.vo flocq/Calc/Fcalc_bracket.vo flocq/Calc/Fcalc_digits.vo
+flocq/Calc/Fcalc_div.vo flocq/Calc/Fcalc_div.glob: flocq/Calc/Fcalc_div.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_digits.vo flocq/Calc/Fcalc_bracket.vo flocq/Calc/Fcalc_digits.vo
+flocq/Calc/Fcalc_round.vo flocq/Calc/Fcalc_round.glob: flocq/Calc/Fcalc_round.v flocq/Core/Fcore.vo flocq/Core/Fcore_digits.vo flocq/Calc/Fcalc_bracket.vo flocq/Calc/Fcalc_digits.vo
+flocq/Calc/Fcalc_digits.vo flocq/Calc/Fcalc_digits.glob: flocq/Calc/Fcalc_digits.v flocq/Core/Fcore_Raux.vo flocq/Core/Fcore_defs.vo flocq/Core/Fcore_float_prop.vo flocq/Core/Fcore_digits.vo
+flocq/Appli/Fappli_IEEE_bits.vo flocq/Appli/Fappli_IEEE_bits.glob: flocq/Appli/Fappli_IEEE_bits.v flocq/Core/Fcore.vo flocq/Core/Fcore_digits.vo flocq/Calc/Fcalc_digits.vo flocq/Appli/Fappli_IEEE.vo
+flocq/Appli/Fappli_IEEE.vo flocq/Appli/Fappli_IEEE.glob: flocq/Appli/Fappli_IEEE.v flocq/Core/Fcore.vo flocq/Core/Fcore_digits.vo flocq/Calc/Fcalc_digits.vo flocq/Calc/Fcalc_round.vo flocq/Calc/Fcalc_bracket.vo flocq/Calc/Fcalc_ops.vo flocq/Calc/Fcalc_div.vo flocq/Calc/Fcalc_sqrt.vo flocq/Prop/Fprop_relative.vo
diff --git a/LICENSE b/LICENSE
index f09cc89..49f5923 100644
--- a/LICENSE
+++ b/LICENSE
@@ -29,7 +29,6 @@ option) any later version:
cfrontend/PrintCsyntax.ml lib/Maps.v
cfrontend/PrintClight.ml lib/Parmov.v
lib/Camlcoq.ml
- lib/Floataux.ml
all files in the runtime/ directory
all files in the cparser/ directory
@@ -43,6 +42,13 @@ files are free software and can be used both in commercial and
non-commercial contexts, subject to the terms of the GNU General
Public License.
+The files contained in the flocq/ directory and its subdirectories are
+taken from the Flocq project, http://flocq.gforge.inria.fr/ These
+files are Copyright 2010-2012 INRIA and distributed under the terms of
+the GNU Lesser General Public Licence, either version 3 of the
+licence, or (at your option) any later version. A copy of the
+GNU Lesser General Public Licence version 3 is included below.
+
Finally, the following files are taken from the CIL library:
cparser/Cabs.ml
cparser/Lexer.mli
@@ -514,3 +520,171 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/Makefile b/Makefile
index 793a4d4..175f773 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,8 @@
include Makefile.config
-DIRS=lib common $(ARCH)/$(VARIANT) $(ARCH) backend cfrontend driver
+DIRS=lib common $(ARCH)/$(VARIANT) $(ARCH) backend cfrontend driver \
+ flocq/Core flocq/Prop flocq/Calc flocq/Appli
INCLUDES=$(patsubst %,-I %, $(DIRS))
@@ -36,6 +37,17 @@ OCB_OPTIONS_CHECKLINK=\
VPATH=$(DIRS)
GPATH=$(DIRS)
+# Flocq
+FLOCQ_CORE=Fcore_float_prop.v Fcore_Zaux.v Fcore_rnd_ne.v Fcore_FTZ.v \
+ Fcore_FLT.v Fcore_defs.v Fcore_Raux.v Fcore_ulp.v Fcore_rnd.v Fcore_FLX.v \
+ Fcore_FIX.v Fcore_digits.v Fcore_generic_fmt.v Fcore.v
+FLOCQ_PROP=Fprop_Sterbenz.v Fprop_mult_error.v Fprop_relative.v \
+ Fprop_div_sqrt_error.v Fprop_plus_error.v
+FLOCQ_CALC=Fcalc_ops.v Fcalc_bracket.v Fcalc_sqrt.v Fcalc_div.v Fcalc_round.v \
+ Fcalc_digits.v
+FLOCQ_APPLI=Fappli_IEEE_bits.v Fappli_IEEE.v
+FLOCQ=$(FLOCQ_CORE) $(FLOCQ_PROP) $(FLOCQ_CALC) $(FLOCQ_APPLI)
+
# General-purpose libraries (in lib/)
LIB=Axioms.v Coqlib.v Intv.v Maps.v Heaps.v Lattice.v Ordered.v \
@@ -90,7 +102,7 @@ DRIVER=Compiler.v Complements.v
# All source files
-FILES=$(LIB) $(COMMON) $(BACKEND) $(CFRONTEND) $(DRIVER)
+FILES=$(LIB) $(COMMON) $(BACKEND) $(CFRONTEND) $(DRIVER) $(FLOCQ)
# Symbolic links vs. copy
diff --git a/arm/PrintAsm.ml b/arm/PrintAsm.ml
index 1f053df..161c760 100644
--- a/arm/PrintAsm.ml
+++ b/arm/PrintAsm.ml
@@ -137,7 +137,7 @@ let distance_to_emit_constants () =
let float_labels = (Hashtbl.create 39 : (int64, int) Hashtbl.t)
let label_float f =
- let bf = Int64.bits_of_float f in
+ let bf = camlint64_of_coqint(Floats.Float.bits_of_double f) in
try
Hashtbl.find float_labels bf
with Not_found ->
@@ -563,7 +563,7 @@ let print_instruction oc = function
(* We could make good use of the fconstd instruction, but it's available
in VFD v3 and up, not in v1 nor v2 *)
let lbl = label_float f in
- fprintf oc " fldd %a, .L%d @ %.12g\n" freg r1 lbl f; 1
+ fprintf oc " fldd %a, .L%d @ %.12g\n" freg r1 lbl (camlfloat_of_coqfloat f); 1
| Pfcmpd(r1, r2) ->
fprintf oc " fcmpd %a, %a\n" freg r1 freg r2;
fprintf oc " fmstat\n"; 2
@@ -707,9 +707,11 @@ let print_init oc = function
| Init_int32 n ->
fprintf oc " .word %ld\n" (camlint_of_coqint n)
| Init_float32 n ->
- fprintf oc " .word 0x%lx %s %.15g \n" (Int32.bits_of_float n) comment n
+ fprintf oc " .word 0x%lx %s %.15g \n" (camlint_of_coqint (Floats.Float.bits_of_single n))
+ comment (camlfloat_of_coqfloat n)
| Init_float64 n ->
- fprintf oc " .quad %Ld %s %.18g\n" (Int64.bits_of_float n) comment n
+ fprintf oc " .quad %Ld %s %.18g\n" (camlint64_of_coqint (Floats.Float.bits_of_double n))
+ comment (camlfloat_of_coqfloat n)
| Init_space n ->
let n = camlint_of_z n in
if n > 0l then fprintf oc " .space %ld\n" n
diff --git a/arm/PrintOp.ml b/arm/PrintOp.ml
index 8f21cd4..b5a8d75 100644
--- a/arm/PrintOp.ml
+++ b/arm/PrintOp.ml
@@ -58,7 +58,7 @@ let print_condition reg pp = function
let print_operation reg pp = function
| Omove, [r1] -> reg pp r1
| Ointconst n, [] -> fprintf pp "%ld" (camlint_of_coqint n)
- | Ofloatconst n, [] -> fprintf pp "%F" n
+ | Ofloatconst n, [] -> fprintf pp "%F" (camlfloat_of_coqfloat n)
| Oaddrsymbol(id, ofs), [] ->
fprintf pp "\"%s\" + %ld" (extern_atom id) (camlint_of_coqint ofs)
| Oaddrstack ofs, [] ->
diff --git a/backend/CMparser.mly b/backend/CMparser.mly
index 37b6144..4a91a01 100644
--- a/backend/CMparser.mly
+++ b/backend/CMparser.mly
@@ -368,10 +368,10 @@ init_data:
| INT32 INTLIT { Init_int32 (coqint_of_camlint $2) }
| INT INTLIT { Init_int32 (coqint_of_camlint $2) }
| INTLIT { Init_int32 (coqint_of_camlint $1) }
- | FLOAT32 FLOATLIT { Init_float32 ($2) }
- | FLOAT64 FLOATLIT { Init_float64 ($2) }
- | FLOAT FLOATLIT { Init_float64 ($2) }
- | FLOATLIT { Init_float64 ($1) }
+ | FLOAT32 FLOATLIT { Init_float32 (coqfloat_of_camlfloat $2) }
+ | FLOAT64 FLOATLIT { Init_float64 (coqfloat_of_camlfloat $2) }
+ | FLOAT FLOATLIT { Init_float64 (coqfloat_of_camlfloat $2) }
+ | FLOATLIT { Init_float64 (coqfloat_of_camlfloat $1) }
| LBRACKET INTLIT RBRACKET { Init_space (z_of_camlint $2) }
| INTLIT LPAREN STRINGLIT RPAREN { Init_addrof ($3, coqint_of_camlint $1) }
;
@@ -491,7 +491,7 @@ expr:
LPAREN expr RPAREN { $2 }
| IDENT { Rvar $1 }
| INTLIT { intconst $1 }
- | FLOATLIT { Rconst(Ofloatconst $1) }
+ | FLOATLIT { Rconst(Ofloatconst (coqfloat_of_camlfloat $1)) }
| STRINGLIT { Rconst(Oaddrsymbol($1, Int.zero)) }
| AMPERSAND INTLIT { Rconst(Oaddrstack(coqint_of_camlint $2)) }
| MINUS expr %prec p_uminus { Runop(Onegint, $2) }
diff --git a/backend/CMtypecheck.ml b/backend/CMtypecheck.ml
index 244a73f..8db488a 100644
--- a/backend/CMtypecheck.ml
+++ b/backend/CMtypecheck.ml
@@ -125,7 +125,7 @@ let type_binary_operation = function
let name_of_constant = function
| Ointconst n -> sprintf "intconst %ld" (camlint_of_coqint n)
- | Ofloatconst n -> sprintf "floatconst %g" n
+ | Ofloatconst n -> sprintf "floatconst %g" (camlfloat_of_coqfloat n)
| Oaddrsymbol (s, ofs) -> sprintf "addrsymbol %s %ld" (extern_atom s) (camlint_of_coqint ofs)
| Oaddrstack n -> sprintf "addrstack %ld" (camlint_of_coqint n)
diff --git a/backend/PrintCminor.ml b/backend/PrintCminor.ml
index f355760..cdbd870 100644
--- a/backend/PrintCminor.ml
+++ b/backend/PrintCminor.ml
@@ -125,7 +125,7 @@ let rec expr p (prec, e) =
| Econst(Ointconst n) ->
fprintf p "%ld" (camlint_of_coqint n)
| Econst(Ofloatconst f) ->
- fprintf p "%F" f
+ fprintf p "%F" (camlfloat_of_coqfloat f)
| Econst(Oaddrsymbol(id, ofs)) ->
let ofs = camlint_of_coqint ofs in
if ofs = 0l
@@ -288,8 +288,8 @@ let print_init_data p = function
| Init_int8 i -> fprintf p "int8 %ld" (camlint_of_coqint i)
| Init_int16 i -> fprintf p "int16 %ld" (camlint_of_coqint i)
| Init_int32 i -> fprintf p "%ld" (camlint_of_coqint i)
- | Init_float32 f -> fprintf p "float32 %F" f
- | Init_float64 f -> fprintf p "%F" f
+ | Init_float32 f -> fprintf p "float32 %F" (camlfloat_of_coqfloat f)
+ | Init_float64 f -> fprintf p "%F" (camlfloat_of_coqfloat f)
| Init_space i -> fprintf p "[%ld]" (camlint_of_coqint i)
| Init_addrof(id,off) -> fprintf p "%ld(\"%s\")" (camlint_of_coqint off) (extern_atom id)
diff --git a/cfrontend/C2C.ml b/cfrontend/C2C.ml
index 0d29a23..831f92f 100644
--- a/cfrontend/C2C.ml
+++ b/cfrontend/C2C.ml
@@ -392,7 +392,7 @@ let rec convertExpr env e =
| C.EConst(C.CFloat(f, k, _)) ->
if k = C.FLongDouble then
unsupported "'long double' floating-point literal";
- Eval(Vfloat(f), ty)
+ Eval(Vfloat(coqfloat_of_camlfloat f), ty)
| C.EConst(C.CStr s) ->
let ty = typeStringLiteral s in
Evalof(Evar(name_for_string_literal env s, ty), ty)
diff --git a/cfrontend/PrintClight.ml b/cfrontend/PrintClight.ml
index 1bd21da..feb07fa 100644
--- a/cfrontend/PrintClight.ml
+++ b/cfrontend/PrintClight.ml
@@ -90,7 +90,7 @@ let rec expr p (prec, e) =
| Econst_int(n, _) ->
fprintf p "%ld" (camlint_of_coqint n)
| Econst_float(f, _) ->
- fprintf p "%F" f
+ fprintf p "%F" (camlfloat_of_coqfloat f)
| Eunop(op, a1, _) ->
fprintf p "%s%a" (name_unop op) expr (prec', a1)
| Eaddrof(a1, _) ->
diff --git a/cfrontend/PrintCsyntax.ml b/cfrontend/PrintCsyntax.ml
index 3880188..ae25e17 100644
--- a/cfrontend/PrintCsyntax.ml
+++ b/cfrontend/PrintCsyntax.ml
@@ -175,7 +175,7 @@ let print_pointer_hook
let print_value p v =
match v with
| Vint n -> fprintf p "%ld" (camlint_of_coqint n)
- | Vfloat f -> fprintf p "%F" f
+ | Vfloat f -> fprintf p "%F" (camlfloat_of_coqfloat f)
| Vptr(b, ofs) -> fprintf p "<ptr%a>" !print_pointer_hook (b, ofs)
| Vundef -> fprintf p "<undef>"
@@ -383,8 +383,8 @@ let print_init p = function
| Init_int8 n -> fprintf p "%ld,@ " (camlint_of_coqint n)
| Init_int16 n -> fprintf p "%ld,@ " (camlint_of_coqint n)
| Init_int32 n -> fprintf p "%ld,@ " (camlint_of_coqint n)
- | Init_float32 n -> fprintf p "%F,@ " n
- | Init_float64 n -> fprintf p "%F,@ " n
+ | Init_float32 n -> fprintf p "%F,@ " (camlfloat_of_coqfloat n)
+ | Init_float64 n -> fprintf p "%F,@ " (camlfloat_of_coqfloat n)
| Init_space n -> fprintf p "/* skip %ld, */@ " (camlint_of_coqint n)
| Init_addrof(symb, ofs) ->
let ofs = camlint_of_coqint ofs in
diff --git a/common/Memory.v b/common/Memory.v
index 49dcd7b..f9b322e 100644
--- a/common/Memory.v
+++ b/common/Memory.v
@@ -1970,7 +1970,7 @@ Proof.
destruct (zlt ofs0 lo). eapply perm_drop_3; eauto.
destruct (zle hi ofs0). eapply perm_drop_3; eauto.
apply perm_implies with p. eapply perm_drop_1; eauto. omega.
- generalize (size_chunk_pos chunk); intros. intuition. omegaContradiction. omegaContradiction.
+ generalize (size_chunk_pos chunk); intros. intuition.
eapply perm_drop_3; eauto.
Qed.
diff --git a/coq b/coq
index 53ed140..ce3e1ed 100755
--- a/coq
+++ b/coq
@@ -13,5 +13,5 @@ make -q ${1}o || {
done)
}
-coqide -I lib -I common -I $ARCH/$VARIANT -I $ARCH -I backend -I cfrontend $1 \
+coqide -I lib -I common -I $ARCH/$VARIANT -I $ARCH -I backend -I cfrontend -I flocq -I flocq/Appli -I flocq/Calc -I flocq/Core -I flocq/Prop $1 \
&& make ${1}o
diff --git a/driver/Interp.ml b/driver/Interp.ml
index a74d4d8..0c19673 100644
--- a/driver/Interp.ml
+++ b/driver/Interp.ml
@@ -12,6 +12,8 @@
(* Interpreting CompCert C sources *)
+type caml_float = float
+
open Format
open Camlcoq
open Datatypes
@@ -48,7 +50,7 @@ let print_id_ofs p (id, ofs) =
let print_eventval p = function
| EVint n -> fprintf p "%ld" (camlint_of_coqint n)
- | EVfloat f -> fprintf p "%F" f
+ | EVfloat f -> fprintf p "%F" (camlfloat_of_coqfloat f)
| EVptr_global(id, ofs) -> fprintf p "&%a" print_id_ofs (id, ofs)
let print_eventval_list p = function
@@ -297,7 +299,7 @@ let extract_string ge m id ofs =
let re_conversion = Str.regexp
"%[-+0# ]*[0-9]*\\(\\.[0-9]*\\)?\\([lhjzt]\\|hh\\)?\\([aAcdeEfgGinopsuxX%]\\)"
-external format_float: string -> float -> string
+external format_float: string -> caml_float -> string
= "caml_format_float"
external format_int32: string -> int32 -> string
= "caml_int32_format"
@@ -332,7 +334,7 @@ let do_printf ge m fmt args =
Buffer.add_string b (format_int32 pat (camlint_of_coqint i));
scan pos' args'
| EVfloat f :: args', ('f'|'e'|'E'|'g'|'G'|'a') ->
- Buffer.add_string b (format_float pat f);
+ Buffer.add_string b (format_float pat (camlfloat_of_coqfloat f));
scan pos' args'
| EVptr_global(id, ofs) :: args', 's' ->
Buffer.add_string b
@@ -372,8 +374,9 @@ and world_vload chunk id ofs =
| Mint16signed -> EVint(coqint_of_camlint(Int32.sub (Random.int32 0x10000l) 0x8000l))
| Mint16unsigned -> EVint(coqint_of_camlint(Random.int32 0x10000l))
| Mint32 -> EVint(coqint_of_camlint(Random.int32 0x7FFF_FFFFl))
- | Mfloat32 -> EVfloat(Floats.Float.singleoffloat(Random.float 1.0))
- | Mfloat64 -> EVfloat(Random.float 1.0)
+ | Mfloat32 -> EVfloat(
+ Floats.Float.singleoffloat(coqfloat_of_camlfloat(Random.float 1.0)))
+ | Mfloat64 -> EVfloat(coqfloat_of_camlfloat(Random.float 1.0))
in Some(res, world)
and world_vstore chunk id ofs v =
diff --git a/extraction/extraction.v b/extraction/extraction.v
index 1156d04..81cf53b 100644
--- a/extraction/extraction.v
+++ b/extraction/extraction.v
@@ -28,27 +28,6 @@ Require Import ExtrOcamlString.
(* Wfsimpl *)
Extraction Inline Wfsimpl.Fix Wfsimpl.Fixm.
-(* Float *)
-Extract Inlined Constant Floats.float => "float".
-Extract Constant Floats.Float.zero => "0.".
-Extract Constant Floats.Float.neg => "( ~-. )".
-Extract Constant Floats.Float.abs => "abs_float".
-Extract Constant Floats.Float.singleoffloat => "Floataux.singleoffloat".
-Extract Constant Floats.Float.intoffloat => "Floataux.intoffloat".
-Extract Constant Floats.Float.intuoffloat => "Floataux.intuoffloat".
-Extract Constant Floats.Float.floatofint => "Floataux.floatofint".
-Extract Constant Floats.Float.floatofintu => "Floataux.floatofintu".
-Extract Constant Floats.Float.add => "( +. )".
-Extract Constant Floats.Float.sub => "( -. )".
-Extract Constant Floats.Float.mul => "( *. )".
-Extract Constant Floats.Float.div => "( /. )".
-Extract Constant Floats.Float.cmp => "Floataux.cmp".
-Extract Constant Floats.Float.eq_dec => "fun (x: float) (y: float) -> x = y".
-Extract Constant Floats.Float.bits_of_double => "Floataux.bits_of_double".
-Extract Constant Floats.Float.double_of_bits => "Floataux.double_of_bits".
-Extract Constant Floats.Float.bits_of_single => "Floataux.bits_of_single".
-Extract Constant Floats.Float.single_of_bits => "Floataux.single_of_bits".
-
(* Memdata *)
Extract Constant Memdata.big_endian => "Memdataaux.big_endian".
@@ -112,6 +91,13 @@ Load extractionMachdep.
(* Avoid name clashes *)
Extraction Blacklist List String Int.
+(* Cutting the dependancy to R. *)
+Extract Inlined Constant Fcore_defs.F2R => "fun _ -> assert false".
+Extract Inlined Constant Fappli_IEEE.FF2R => "fun _ -> assert false".
+Extract Inlined Constant Fappli_IEEE.B2R => "fun _ -> assert false".
+Extract Inlined Constant Fappli_IEEE.round_mode => "fun _ -> assert false".
+Extract Inlined Constant Fcalc_bracket.inbetween_loc => "fun _ -> assert false".
+
(* Go! *)
Cd "extraction".
Recursive Extraction Library Compiler.
diff --git a/flocq/Appli/Fappli_IEEE.v b/flocq/Appli/Fappli_IEEE.v
new file mode 100644
index 0000000..79f6a4c
--- /dev/null
+++ b/flocq/Appli/Fappli_IEEE.v
@@ -0,0 +1,1617 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * IEEE-754 arithmetic *)
+Require Import Fcore.
+Require Import Fcore_digits.
+Require Import Fcalc_digits.
+Require Import Fcalc_round.
+Require Import Fcalc_bracket.
+Require Import Fcalc_ops.
+Require Import Fcalc_div.
+Require Import Fcalc_sqrt.
+Require Import Fprop_relative.
+
+Section AnyRadix.
+
+Inductive full_float :=
+ | F754_zero : bool -> full_float
+ | F754_infinity : bool -> full_float
+ | F754_nan : full_float
+ | F754_finite : bool -> positive -> Z -> full_float.
+
+Definition FF2R beta x :=
+ match x with
+ | F754_finite s m e => F2R (Float beta (cond_Zopp s (Zpos m)) e)
+ | _ => R0
+ end.
+
+End AnyRadix.
+
+Section Binary.
+
+(** prec is the number of bits of the mantissa including the implicit one
+ emax is the exponent of the infinities
+ Typically p=24 and emax = 128 in single precision *)
+Variable prec emax : Z.
+Context (prec_gt_0_ : Prec_gt_0 prec).
+Hypothesis Hmax : (prec < emax)%Z.
+
+Let emin := (3 - emax - prec)%Z.
+Let fexp := FLT_exp emin prec.
+Instance fexp_correct : Valid_exp fexp := FLT_exp_valid emin prec.
+Instance fexp_monotone : Monotone_exp fexp := FLT_exp_monotone emin prec.
+
+Definition canonic_mantissa m e :=
+ Zeq_bool (fexp (Z_of_nat (S (digits2_Pnat m)) + e)) e.
+
+Definition bounded m e :=
+ andb (canonic_mantissa m e) (Zle_bool e (emax - prec)).
+
+Definition valid_binary x :=
+ match x with
+ | F754_finite _ m e => bounded m e
+ | _ => true
+ end.
+
+(** Basic type used for representing binary FP numbers.
+ Note that there is exactly one such object per FP datum.
+ NaNs do not have any payload. They cannot be distinguished. *)
+Inductive binary_float :=
+ | B754_zero : bool -> binary_float
+ | B754_infinity : bool -> binary_float
+ | B754_nan : binary_float
+ | B754_finite : bool ->
+ forall (m : positive) (e : Z), bounded m e = true -> binary_float.
+
+Definition FF2B x :=
+ match x as x return valid_binary x = true -> binary_float with
+ | F754_finite s m e => B754_finite s m e
+ | F754_infinity s => fun _ => B754_infinity s
+ | F754_zero s => fun _ => B754_zero s
+ | F754_nan => fun _ => B754_nan
+ end.
+
+Definition B2FF x :=
+ match x with
+ | B754_finite s m e _ => F754_finite s m e
+ | B754_infinity s => F754_infinity s
+ | B754_zero s => F754_zero s
+ | B754_nan => F754_nan
+ end.
+
+Definition radix2 := Build_radix 2 (refl_equal true).
+
+Definition B2R f :=
+ match f with
+ | B754_finite s m e _ => F2R (Float radix2 (cond_Zopp s (Zpos m)) e)
+ | _ => R0
+ end.
+
+Theorem FF2R_B2FF :
+ forall x,
+ FF2R radix2 (B2FF x) = B2R x.
+Proof.
+now intros [sx|sx| |sx mx ex Hx].
+Qed.
+
+Theorem B2FF_FF2B :
+ forall x Hx,
+ B2FF (FF2B x Hx) = x.
+Proof.
+now intros [sx|sx| |sx mx ex] Hx.
+Qed.
+
+Theorem valid_binary_B2FF :
+ forall x,
+ valid_binary (B2FF x) = true.
+Proof.
+now intros [sx|sx| |sx mx ex Hx].
+Qed.
+
+Theorem FF2B_B2FF :
+ forall x H,
+ FF2B (B2FF x) H = x.
+Proof.
+intros [sx|sx| |sx mx ex Hx] H ; try easy.
+apply f_equal.
+apply eqbool_irrelevance.
+Qed.
+
+Theorem FF2B_B2FF_valid :
+ forall x,
+ FF2B (B2FF x) (valid_binary_B2FF x) = x.
+Proof.
+intros x.
+apply FF2B_B2FF.
+Qed.
+
+Theorem B2R_FF2B :
+ forall x Hx,
+ B2R (FF2B x Hx) = FF2R radix2 x.
+Proof.
+now intros [sx|sx| |sx mx ex] Hx.
+Qed.
+
+Theorem match_FF2B :
+ forall {T} fz fi fn ff x Hx,
+ match FF2B x Hx return T with
+ | B754_zero sx => fz sx
+ | B754_infinity sx => fi sx
+ | B754_nan => fn
+ | B754_finite sx mx ex _ => ff sx mx ex
+ end =
+ match x with
+ | F754_zero sx => fz sx
+ | F754_infinity sx => fi sx
+ | F754_nan => fn
+ | F754_finite sx mx ex => ff sx mx ex
+ end.
+Proof.
+now intros T fz fi fn ff [sx|sx| |sx mx ex] Hx.
+Qed.
+
+Theorem canonic_canonic_mantissa :
+ forall (sx : bool) mx ex,
+ canonic_mantissa mx ex = true ->
+ canonic radix2 fexp (Float radix2 (cond_Zopp sx (Zpos mx)) ex).
+Proof.
+intros sx mx ex H.
+assert (Hx := Zeq_bool_eq _ _ H). clear H.
+apply sym_eq.
+simpl.
+pattern ex at 2 ; rewrite <- Hx.
+apply (f_equal fexp).
+rewrite ln_beta_F2R_Zdigits.
+rewrite <- Zdigits_abs.
+rewrite Z_of_nat_S_digits2_Pnat.
+now case sx.
+now case sx.
+Qed.
+
+Theorem generic_format_B2R :
+ forall x,
+ generic_format radix2 fexp (B2R x).
+Proof.
+intros [sx|sx| |sx mx ex Hx] ; try apply generic_format_0.
+simpl.
+apply generic_format_canonic.
+apply canonic_canonic_mantissa.
+now destruct (andb_prop _ _ Hx) as (H, _).
+Qed.
+
+Theorem B2FF_inj :
+ forall x y : binary_float,
+ B2FF x = B2FF y ->
+ x = y.
+Proof.
+intros [sx|sx| |sx mx ex Hx] [sy|sy| |sy my ey Hy] ; try easy.
+(* *)
+intros H.
+now inversion H.
+(* *)
+intros H.
+now inversion H.
+(* *)
+intros H.
+inversion H.
+clear H.
+revert Hx.
+rewrite H2, H3.
+intros Hx.
+apply f_equal.
+apply eqbool_irrelevance.
+Qed.
+
+Definition is_finite_strict f :=
+ match f with
+ | B754_finite _ _ _ _ => true
+ | _ => false
+ end.
+
+Theorem B2R_inj:
+ forall x y : binary_float,
+ is_finite_strict x = true ->
+ is_finite_strict y = true ->
+ B2R x = B2R y ->
+ x = y.
+Proof.
+intros [sx|sx| |sx mx ex Hx] [sy|sy| |sy my ey Hy] ; try easy.
+simpl.
+intros _ _ Heq.
+assert (Hs: sx = sy).
+(* *)
+revert Heq. clear.
+case sx ; case sy ; try easy ;
+ intros Heq ; apply False_ind ; revert Heq.
+apply Rlt_not_eq.
+apply Rlt_trans with R0.
+now apply F2R_lt_0_compat.
+now apply F2R_gt_0_compat.
+apply Rgt_not_eq.
+apply Rgt_trans with R0.
+now apply F2R_gt_0_compat.
+now apply F2R_lt_0_compat.
+assert (mx = my /\ ex = ey).
+(* *)
+refine (_ (canonic_unicity _ fexp _ _ _ _ Heq)).
+rewrite Hs.
+now case sy ; intro H ; injection H ; split.
+apply canonic_canonic_mantissa.
+exact (proj1 (andb_prop _ _ Hx)).
+apply canonic_canonic_mantissa.
+exact (proj1 (andb_prop _ _ Hy)).
+(* *)
+revert Hx.
+rewrite Hs, (proj1 H), (proj2 H).
+intros Hx.
+apply f_equal.
+apply eqbool_irrelevance.
+Qed.
+
+Definition is_finite f :=
+ match f with
+ | B754_finite _ _ _ _ => true
+ | B754_zero _ => true
+ | _ => false
+ end.
+
+Definition is_finite_FF f :=
+ match f with
+ | F754_finite _ _ _ => true
+ | F754_zero _ => true
+ | _ => false
+ end.
+
+Theorem is_finite_FF2B :
+ forall x Hx,
+ is_finite (FF2B x Hx) = is_finite_FF x.
+Proof.
+now intros [| | |].
+Qed.
+
+Theorem is_finite_FF_B2FF :
+ forall x,
+ is_finite_FF (B2FF x) = is_finite x.
+Proof.
+now intros [| | |].
+Qed.
+
+Definition Bopp x :=
+ match x with
+ | B754_nan => x
+ | B754_infinity sx => B754_infinity (negb sx)
+ | B754_finite sx mx ex Hx => B754_finite (negb sx) mx ex Hx
+ | B754_zero sx => B754_zero (negb sx)
+ end.
+
+Theorem Bopp_involutive :
+ forall x, Bopp (Bopp x) = x.
+Proof.
+now intros [sx|sx| |sx mx ex Hx] ; simpl ; try rewrite Bool.negb_involutive.
+Qed.
+
+Theorem B2R_Bopp :
+ forall x,
+ B2R (Bopp x) = (- B2R x)%R.
+Proof.
+intros [sx|sx| |sx mx ex Hx] ; apply sym_eq ; try apply Ropp_0.
+simpl.
+rewrite <- F2R_opp.
+now case sx.
+Qed.
+
+
+Theorem is_finite_Bopp: forall x,
+ is_finite (Bopp x) = is_finite x.
+Proof.
+now intros [| | |].
+Qed.
+
+
+
+Theorem bounded_lt_emax :
+ forall mx ex,
+ bounded mx ex = true ->
+ (F2R (Float radix2 (Zpos mx) ex) < bpow radix2 emax)%R.
+Proof.
+intros mx ex Hx.
+destruct (andb_prop _ _ Hx) as (H1,H2).
+generalize (Zeq_bool_eq _ _ H1). clear H1. intro H1.
+generalize (Zle_bool_imp_le _ _ H2). clear H2. intro H2.
+generalize (ln_beta_F2R_Zdigits radix2 (Zpos mx) ex).
+destruct (ln_beta radix2 (F2R (Float radix2 (Zpos mx) ex))) as (e',Ex).
+unfold ln_beta_val.
+intros H.
+apply Rlt_le_trans with (bpow radix2 e').
+change (Zpos mx) with (Zabs (Zpos mx)).
+rewrite F2R_Zabs.
+apply Ex.
+apply Rgt_not_eq.
+now apply F2R_gt_0_compat.
+apply bpow_le.
+rewrite H. 2: discriminate.
+revert H1. clear -H2.
+rewrite Z_of_nat_S_digits2_Pnat.
+change Fcalc_digits.radix2 with radix2.
+unfold fexp, FLT_exp.
+generalize (Zdigits radix2 (Zpos mx)).
+intros ; zify ; subst.
+clear -H H2. clearbody emin.
+omega.
+Qed.
+
+Theorem abs_B2R_lt_emax :
+ forall x,
+ (Rabs (B2R x) < bpow radix2 emax)%R.
+Proof.
+intros [sx|sx| |sx mx ex Hx] ; simpl ; try ( rewrite Rabs_R0 ; apply bpow_gt_0 ).
+rewrite <- F2R_Zabs, abs_cond_Zopp.
+now apply bounded_lt_emax.
+Qed.
+
+Theorem bounded_canonic_lt_emax :
+ forall mx ex,
+ canonic radix2 fexp (Float radix2 (Zpos mx) ex) ->
+ (F2R (Float radix2 (Zpos mx) ex) < bpow radix2 emax)%R ->
+ bounded mx ex = true.
+Proof.
+intros mx ex Cx Bx.
+apply andb_true_intro.
+split.
+unfold canonic_mantissa.
+unfold canonic, Fexp in Cx.
+rewrite Cx at 2.
+rewrite Z_of_nat_S_digits2_Pnat.
+change Fcalc_digits.radix2 with radix2.
+unfold canonic_exp.
+rewrite ln_beta_F2R_Zdigits. 2: discriminate.
+now apply -> Zeq_is_eq_bool.
+apply Zle_bool_true.
+unfold canonic, Fexp in Cx.
+rewrite Cx.
+unfold canonic_exp, fexp, FLT_exp.
+destruct (ln_beta radix2 (F2R (Float radix2 (Zpos mx) ex))) as (e',Ex). simpl.
+apply Zmax_lub.
+cut (e' - 1 < emax)%Z. clear ; omega.
+apply lt_bpow with radix2.
+apply Rle_lt_trans with (2 := Bx).
+change (Zpos mx) with (Zabs (Zpos mx)).
+rewrite F2R_Zabs.
+apply Ex.
+apply Rgt_not_eq.
+now apply F2R_gt_0_compat.
+unfold emin.
+generalize (prec_gt_0 prec).
+clear -Hmax ; omega.
+Qed.
+
+(** mantissa, round and sticky bits *)
+Record shr_record := { shr_m : Z ; shr_r : bool ; shr_s : bool }.
+
+Definition shr_1 mrs :=
+ let '(Build_shr_record m r s) := mrs in
+ let s := orb r s in
+ match m with
+ | Z0 => Build_shr_record Z0 false s
+ | Zpos xH => Build_shr_record Z0 true s
+ | Zpos (xO p) => Build_shr_record (Zpos p) false s
+ | Zpos (xI p) => Build_shr_record (Zpos p) true s
+ | Zneg xH => Build_shr_record Z0 true s
+ | Zneg (xO p) => Build_shr_record (Zneg p) false s
+ | Zneg (xI p) => Build_shr_record (Zneg p) true s
+ end.
+
+Definition loc_of_shr_record mrs :=
+ match mrs with
+ | Build_shr_record _ false false => loc_Exact
+ | Build_shr_record _ false true => loc_Inexact Lt
+ | Build_shr_record _ true false => loc_Inexact Eq
+ | Build_shr_record _ true true => loc_Inexact Gt
+ end.
+
+Definition shr_record_of_loc m l :=
+ match l with
+ | loc_Exact => Build_shr_record m false false
+ | loc_Inexact Lt => Build_shr_record m false true
+ | loc_Inexact Eq => Build_shr_record m true false
+ | loc_Inexact Gt => Build_shr_record m true true
+ end.
+
+Theorem shr_m_shr_record_of_loc :
+ forall m l,
+ shr_m (shr_record_of_loc m l) = m.
+Proof.
+now intros m [|[| |]].
+Qed.
+
+Theorem loc_of_shr_record_of_loc :
+ forall m l,
+ loc_of_shr_record (shr_record_of_loc m l) = l.
+Proof.
+now intros m [|[| |]].
+Qed.
+
+Definition shr mrs e n :=
+ match n with
+ | Zpos p => (iter_pos p _ shr_1 mrs, (e + n)%Z)
+ | _ => (mrs, e)
+ end.
+
+Lemma inbetween_shr_1 :
+ forall x mrs e,
+ (0 <= shr_m mrs)%Z ->
+ inbetween_float radix2 (shr_m mrs) e x (loc_of_shr_record mrs) ->
+ inbetween_float radix2 (shr_m (shr_1 mrs)) (e + 1) x (loc_of_shr_record (shr_1 mrs)).
+Proof.
+intros x mrs e Hm Hl.
+refine (_ (new_location_even_correct (F2R (Float radix2 (shr_m (shr_1 mrs)) (e + 1))) (bpow radix2 e) 2 _ _ _ x (if shr_r (shr_1 mrs) then 1 else 0) (loc_of_shr_record mrs) _ _)) ; try easy.
+2: apply bpow_gt_0.
+2: now case (shr_r (shr_1 mrs)) ; split.
+change (Z2R 2) with (bpow radix2 1).
+rewrite <- bpow_plus.
+rewrite (Zplus_comm 1), <- (F2R_bpow radix2 (e + 1)).
+unfold inbetween_float, F2R. simpl.
+rewrite Z2R_plus, Rmult_plus_distr_r.
+replace (new_location_even 2 (if shr_r (shr_1 mrs) then 1%Z else 0%Z) (loc_of_shr_record mrs)) with (loc_of_shr_record (shr_1 mrs)).
+easy.
+clear -Hm.
+destruct mrs as (m, r, s).
+now destruct m as [|[m|m|]|m] ; try (now elim Hm) ; destruct r as [|] ; destruct s as [|].
+rewrite (F2R_change_exp radix2 e).
+2: apply Zle_succ.
+unfold F2R. simpl.
+rewrite <- 2!Rmult_plus_distr_r, <- 2!Z2R_plus.
+rewrite Zplus_assoc.
+replace (shr_m (shr_1 mrs) * 2 ^ (e + 1 - e) + (if shr_r (shr_1 mrs) then 1%Z else 0%Z))%Z with (shr_m mrs).
+exact Hl.
+ring_simplify (e + 1 - e)%Z.
+change (2^1)%Z with 2%Z.
+rewrite Zmult_comm.
+clear -Hm.
+destruct mrs as (m, r, s).
+now destruct m as [|[m|m|]|m] ; try (now elim Hm) ; destruct r as [|] ; destruct s as [|].
+Qed.
+
+Theorem inbetween_shr :
+ forall x m e l n,
+ (0 <= m)%Z ->
+ inbetween_float radix2 m e x l ->
+ let '(mrs, e') := shr (shr_record_of_loc m l) e n in
+ inbetween_float radix2 (shr_m mrs) e' x (loc_of_shr_record mrs).
+Proof.
+intros x m e l n Hm Hl.
+destruct n as [|n|n].
+now destruct l as [|[| |]].
+2: now destruct l as [|[| |]].
+unfold shr.
+rewrite iter_nat_of_P.
+rewrite Zpos_eq_Z_of_nat_o_nat_of_P.
+induction (nat_of_P n).
+simpl.
+rewrite Zplus_0_r.
+now destruct l as [|[| |]].
+simpl iter_nat.
+rewrite inj_S.
+unfold Zsucc.
+rewrite Zplus_assoc.
+revert IHn0.
+apply inbetween_shr_1.
+clear -Hm.
+induction n0.
+now destruct l as [|[| |]].
+simpl.
+revert IHn0.
+generalize (iter_nat n0 shr_record shr_1 (shr_record_of_loc m l)).
+clear.
+intros (m, r, s) Hm.
+now destruct m as [|[m|m|]|m] ; try (now elim Hm) ; destruct r as [|] ; destruct s as [|].
+Qed.
+
+Definition Zdigits2 m :=
+ match m with Z0 => m | Zpos p => Z_of_nat (S (digits2_Pnat p)) | Zneg p => Z_of_nat (S (digits2_Pnat p)) end.
+
+Theorem Zdigits2_Zdigits :
+ forall m,
+ Zdigits2 m = Zdigits radix2 m.
+Proof.
+unfold Zdigits2.
+intros [|m|m] ; try apply Z_of_nat_S_digits2_Pnat.
+easy.
+Qed.
+
+Definition shr_fexp m e l :=
+ shr (shr_record_of_loc m l) e (fexp (Zdigits2 m + e) - e).
+
+Theorem shr_truncate :
+ forall m e l,
+ (0 <= m)%Z ->
+ shr_fexp m e l =
+ let '(m', e', l') := truncate radix2 fexp (m, e, l) in (shr_record_of_loc m' l', e').
+Proof.
+intros m e l Hm.
+case_eq (truncate radix2 fexp (m, e, l)).
+intros (m', e') l'.
+unfold shr_fexp.
+rewrite Zdigits2_Zdigits.
+case_eq (fexp (Zdigits radix2 m + e) - e)%Z.
+(* *)
+intros He.
+unfold truncate.
+rewrite He.
+simpl.
+intros H.
+now inversion H.
+(* *)
+intros p Hp.
+assert (He: (e <= fexp (Zdigits radix2 m + e))%Z).
+clear -Hp ; zify ; omega.
+destruct (inbetween_float_ex radix2 m e l) as (x, Hx).
+generalize (inbetween_shr x m e l (fexp (Zdigits radix2 m + e) - e) Hm Hx).
+assert (Hx0 : (0 <= x)%R).
+apply Rle_trans with (F2R (Float radix2 m e)).
+now apply F2R_ge_0_compat.
+exact (proj1 (inbetween_float_bounds _ _ _ _ _ Hx)).
+case_eq (shr (shr_record_of_loc m l) e (fexp (Zdigits radix2 m + e) - e)).
+intros mrs e'' H3 H4 H1.
+generalize (truncate_correct radix2 _ x m e l Hx0 Hx (or_introl _ He)).
+rewrite H1.
+intros (H2,_).
+rewrite <- Hp, H3.
+assert (e'' = e').
+change (snd (mrs, e'') = snd (fst (m',e',l'))).
+rewrite <- H1, <- H3.
+unfold truncate.
+now rewrite Hp.
+rewrite H in H4 |- *.
+apply (f_equal (fun v => (v, _))).
+destruct (inbetween_float_unique _ _ _ _ _ _ _ H2 H4) as (H5, H6).
+rewrite H5, H6.
+case mrs.
+now intros m0 [|] [|].
+(* *)
+intros p Hp.
+unfold truncate.
+rewrite Hp.
+simpl.
+intros H.
+now inversion H.
+Qed.
+
+Inductive mode := mode_NE | mode_ZR | mode_DN | mode_UP | mode_NA.
+
+Definition round_mode m :=
+ match m with
+ | mode_NE => ZnearestE
+ | mode_ZR => Ztrunc
+ | mode_DN => Zfloor
+ | mode_UP => Zceil
+ | mode_NA => ZnearestA
+ end.
+
+Definition choice_mode m sx mx lx :=
+ match m with
+ | mode_NE => cond_incr (round_N (negb (Zeven mx)) lx) mx
+ | mode_ZR => mx
+ | mode_DN => cond_incr (round_sign_DN sx lx) mx
+ | mode_UP => cond_incr (round_sign_UP sx lx) mx
+ | mode_NA => cond_incr (round_N true lx) mx
+ end.
+
+Global Instance valid_rnd_round_mode : forall m, Valid_rnd (round_mode m).
+Proof.
+destruct m ; unfold round_mode ; auto with typeclass_instances.
+Qed.
+
+Definition overflow_to_inf m s :=
+ match m with
+ | mode_NE => true
+ | mode_NA => true
+ | mode_ZR => false
+ | mode_UP => negb s
+ | mode_DN => s
+ end.
+
+Definition binary_overflow m s :=
+ if overflow_to_inf m s then F754_infinity s
+ else F754_finite s (match (Zpower 2 prec - 1)%Z with Zpos p => p | _ => xH end) (emax - prec).
+
+Definition binary_round_aux mode sx mx ex lx :=
+ let '(mrs', e') := shr_fexp (Zpos mx) ex lx in
+ let '(mrs'', e'') := shr_fexp (choice_mode mode sx (shr_m mrs') (loc_of_shr_record mrs')) e' loc_Exact in
+ match shr_m mrs'' with
+ | Z0 => F754_zero sx
+ | Zpos m => if Zle_bool e'' (emax - prec) then F754_finite sx m e'' else binary_overflow mode sx
+ | _ => F754_nan (* dummy *)
+ end.
+
+Theorem binary_round_aux_correct :
+ forall mode x mx ex lx,
+ inbetween_float radix2 (Zpos mx) ex (Rabs x) lx ->
+ (ex <= fexp (Zdigits radix2 (Zpos mx) + ex))%Z ->
+ let z := binary_round_aux mode (Rlt_bool x 0) mx ex lx in
+ valid_binary z = true /\
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode mode) x)) (bpow radix2 emax) then
+ FF2R radix2 z = round radix2 fexp (round_mode mode) x /\
+ is_finite_FF z = true
+ else
+ z = binary_overflow mode (Rlt_bool x 0).
+Proof with auto with typeclass_instances.
+intros m x mx ex lx Bx Ex z.
+unfold binary_round_aux in z.
+revert z.
+rewrite shr_truncate. 2: easy.
+refine (_ (round_trunc_sign_any_correct _ _ (round_mode m) (choice_mode m) _ x (Zpos mx) ex lx Bx (or_introl _ Ex))).
+refine (_ (truncate_correct_partial _ _ _ _ _ _ _ Bx Ex)).
+destruct (truncate radix2 fexp (Zpos mx, ex, lx)) as ((m1, e1), l1).
+rewrite loc_of_shr_record_of_loc, shr_m_shr_record_of_loc.
+set (m1' := choice_mode m (Rlt_bool x 0) m1 l1).
+intros (H1a,H1b) H1c.
+rewrite H1c.
+assert (Hm: (m1 <= m1')%Z).
+(* . *)
+unfold m1', choice_mode, cond_incr.
+case m ;
+ try apply Zle_refl ;
+ match goal with |- (m1 <= if ?b then _ else _)%Z =>
+ case b ; [ apply Zle_succ | apply Zle_refl ] end.
+assert (Hr: Rabs (round radix2 fexp (round_mode m) x) = F2R (Float radix2 m1' e1)).
+(* . *)
+rewrite <- (Zabs_eq m1').
+replace (Zabs m1') with (Zabs (cond_Zopp (Rlt_bool x 0) m1')).
+rewrite F2R_Zabs.
+now apply f_equal.
+apply abs_cond_Zopp.
+apply Zle_trans with (2 := Hm).
+apply Zlt_succ_le.
+apply F2R_gt_0_reg with radix2 e1.
+apply Rle_lt_trans with (1 := Rabs_pos x).
+exact (proj2 (inbetween_float_bounds _ _ _ _ _ H1a)).
+(* . *)
+assert (Br: inbetween_float radix2 m1' e1 (Rabs (round radix2 fexp (round_mode m) x)) loc_Exact).
+now apply inbetween_Exact.
+destruct m1' as [|m1'|m1'].
+(* . m1' = 0 *)
+rewrite shr_truncate. 2: apply Zle_refl.
+generalize (truncate_0 radix2 fexp e1 loc_Exact).
+destruct (truncate radix2 fexp (Z0, e1, loc_Exact)) as ((m2, e2), l2).
+rewrite shr_m_shr_record_of_loc.
+intros Hm2.
+rewrite Hm2.
+intros z.
+repeat split.
+rewrite Rlt_bool_true.
+repeat split.
+apply sym_eq.
+case Rlt_bool ; apply F2R_0.
+rewrite <- F2R_Zabs, abs_cond_Zopp, F2R_0.
+apply bpow_gt_0.
+(* . 0 < m1' *)
+assert (He: (e1 <= fexp (Zdigits radix2 (Zpos m1') + e1))%Z).
+rewrite <- ln_beta_F2R_Zdigits, <- Hr, ln_beta_abs.
+2: discriminate.
+rewrite H1b.
+rewrite canonic_exp_abs.
+fold (canonic_exp radix2 fexp (round radix2 fexp (round_mode m) x)).
+apply canonic_exp_round_ge...
+rewrite H1c.
+case (Rlt_bool x 0).
+apply Rlt_not_eq.
+now apply F2R_lt_0_compat.
+apply Rgt_not_eq.
+now apply F2R_gt_0_compat.
+refine (_ (truncate_correct_partial _ _ _ _ _ _ _ Br He)).
+2: now rewrite Hr ; apply F2R_gt_0_compat.
+refine (_ (truncate_correct_format radix2 fexp (Zpos m1') e1 _ _ He)).
+2: discriminate.
+rewrite shr_truncate. 2: easy.
+destruct (truncate radix2 fexp (Zpos m1', e1, loc_Exact)) as ((m2, e2), l2).
+rewrite shr_m_shr_record_of_loc.
+intros (H3,H4) (H2,_).
+destruct m2 as [|m2|m2].
+elim Rgt_not_eq with (2 := H3).
+rewrite F2R_0.
+now apply F2R_gt_0_compat.
+rewrite F2R_cond_Zopp, H3, abs_cond_Ropp, <- F2R_abs.
+simpl Zabs.
+case_eq (Zle_bool e2 (emax - prec)) ; intros He2.
+assert (bounded m2 e2 = true).
+apply andb_true_intro.
+split.
+unfold canonic_mantissa.
+apply Zeq_bool_true.
+rewrite Z_of_nat_S_digits2_Pnat.
+rewrite <- ln_beta_F2R_Zdigits.
+apply sym_eq.
+now rewrite H3 in H4.
+discriminate.
+exact He2.
+apply (conj H).
+rewrite Rlt_bool_true.
+repeat split.
+apply F2R_cond_Zopp.
+now apply bounded_lt_emax.
+rewrite (Rlt_bool_false _ (bpow radix2 emax)).
+refine (conj _ (refl_equal _)).
+unfold binary_overflow.
+case overflow_to_inf.
+apply refl_equal.
+unfold valid_binary, bounded.
+rewrite Zle_bool_refl.
+rewrite Bool.andb_true_r.
+apply Zeq_bool_true.
+rewrite Z_of_nat_S_digits2_Pnat.
+change Fcalc_digits.radix2 with radix2.
+replace (Zdigits radix2 (Zpos (match (Zpower 2 prec - 1)%Z with Zpos p => p | _ => xH end))) with prec.
+unfold fexp, FLT_exp, emin.
+generalize (prec_gt_0 prec).
+clear -Hmax ; zify ; omega.
+change 2%Z with (radix_val radix2).
+case_eq (Zpower radix2 prec - 1)%Z.
+simpl Zdigits.
+generalize (Zpower_gt_1 radix2 prec (prec_gt_0 prec)).
+clear ; omega.
+intros p Hp.
+apply Zle_antisym.
+cut (prec - 1 < Zdigits radix2 (Zpos p))%Z. clear ; omega.
+apply Zdigits_gt_Zpower.
+simpl Zabs. rewrite <- Hp.
+cut (Zpower radix2 (prec - 1) < Zpower radix2 prec)%Z. clear ; omega.
+apply lt_Z2R.
+rewrite 2!Z2R_Zpower. 2: now apply Zlt_le_weak.
+apply bpow_lt.
+apply Zlt_pred.
+now apply Zlt_0_le_0_pred.
+apply Zdigits_le_Zpower.
+simpl Zabs. rewrite <- Hp.
+apply Zlt_pred.
+intros p Hp.
+generalize (Zpower_gt_1 radix2 _ (prec_gt_0 prec)).
+clear -Hp ; zify ; omega.
+apply Rnot_lt_le.
+intros Hx.
+generalize (refl_equal (bounded m2 e2)).
+unfold bounded at 2.
+rewrite He2.
+rewrite Bool.andb_false_r.
+rewrite bounded_canonic_lt_emax with (2 := Hx).
+discriminate.
+unfold canonic.
+now rewrite <- H3.
+elim Rgt_not_eq with (2 := H3).
+apply Rlt_trans with R0.
+now apply F2R_lt_0_compat.
+now apply F2R_gt_0_compat.
+rewrite <- Hr.
+apply generic_format_abs...
+apply generic_format_round...
+(* . not m1' < 0 *)
+elim Rgt_not_eq with (2 := Hr).
+apply Rlt_le_trans with R0.
+now apply F2R_lt_0_compat.
+apply Rabs_pos.
+(* *)
+apply Rlt_le_trans with (2 := proj1 (inbetween_float_bounds _ _ _ _ _ Bx)).
+now apply F2R_gt_0_compat.
+(* all the modes are valid *)
+clear. case m.
+exact inbetween_int_NE_sign.
+exact inbetween_int_ZR_sign.
+exact inbetween_int_DN_sign.
+exact inbetween_int_UP_sign.
+exact inbetween_int_NA_sign.
+Qed.
+
+Definition Bsign x :=
+ match x with
+ | B754_nan => false
+ | B754_zero s => s
+ | B754_infinity s => s
+ | B754_finite s _ _ _ => s
+ end.
+
+Definition sign_FF x :=
+ match x with
+ | F754_nan => false
+ | F754_zero s => s
+ | F754_infinity s => s
+ | F754_finite s _ _ => s
+ end.
+
+Theorem Bsign_FF2B :
+ forall x H,
+ Bsign (FF2B x H) = sign_FF x.
+Proof.
+now intros [sx|sx| |sx mx ex] H.
+Qed.
+
+(** Multiplication *)
+
+Lemma Bmult_correct_aux :
+ forall m sx mx ex (Hx : bounded mx ex = true) sy my ey (Hy : bounded my ey = true),
+ let x := F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex) in
+ let y := F2R (Float radix2 (cond_Zopp sy (Zpos my)) ey) in
+ let z := binary_round_aux m (xorb sx sy) (mx * my) (ex + ey) loc_Exact in
+ valid_binary z = true /\
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (x * y))) (bpow radix2 emax) then
+ FF2R radix2 z = round radix2 fexp (round_mode m) (x * y) /\
+ is_finite_FF z = true
+ else
+ z = binary_overflow m (xorb sx sy).
+Proof.
+intros m sx mx ex Hx sy my ey Hy x y.
+unfold x, y.
+rewrite <- F2R_mult.
+simpl.
+replace (xorb sx sy) with (Rlt_bool (F2R (Float radix2 (cond_Zopp sx (Zpos mx) * cond_Zopp sy (Zpos my)) (ex + ey))) 0).
+apply binary_round_aux_correct.
+constructor.
+rewrite <- F2R_abs.
+apply F2R_eq_compat.
+rewrite Zabs_Zmult.
+now rewrite 2!abs_cond_Zopp.
+(* *)
+change (Zpos (mx * my)) with (Zpos mx * Zpos my)%Z.
+assert (forall m e, bounded m e = true -> fexp (Zdigits radix2 (Zpos m) + e) = e)%Z.
+clear. intros m e Hb.
+destruct (andb_prop _ _ Hb) as (H,_).
+apply Zeq_bool_eq.
+now rewrite <- Z_of_nat_S_digits2_Pnat.
+generalize (H _ _ Hx) (H _ _ Hy).
+clear x y sx sy Hx Hy H.
+unfold fexp, FLT_exp.
+refine (_ (Zdigits_mult_ge radix2 (Zpos mx) (Zpos my) _ _)) ; try discriminate.
+refine (_ (Zdigits_gt_0 radix2 (Zpos mx) _) (Zdigits_gt_0 radix2 (Zpos my) _)) ; try discriminate.
+generalize (Zdigits radix2 (Zpos mx)) (Zdigits radix2 (Zpos my)) (Zdigits radix2 (Zpos mx * Zpos my)).
+clear -Hmax.
+unfold emin.
+intros dx dy dxy Hx Hy Hxy.
+zify ; intros ; subst.
+omega.
+(* *)
+case sx ; case sy.
+apply Rlt_bool_false.
+now apply F2R_ge_0_compat.
+apply Rlt_bool_true.
+now apply F2R_lt_0_compat.
+apply Rlt_bool_true.
+now apply F2R_lt_0_compat.
+apply Rlt_bool_false.
+now apply F2R_ge_0_compat.
+Qed.
+
+Definition Bmult m x y :=
+ match x, y with
+ | B754_nan, _ => x
+ | _, B754_nan => y
+ | B754_infinity sx, B754_infinity sy => B754_infinity (xorb sx sy)
+ | B754_infinity sx, B754_finite sy _ _ _ => B754_infinity (xorb sx sy)
+ | B754_finite sx _ _ _, B754_infinity sy => B754_infinity (xorb sx sy)
+ | B754_infinity _, B754_zero _ => B754_nan
+ | B754_zero _, B754_infinity _ => B754_nan
+ | B754_finite sx _ _ _, B754_zero sy => B754_zero (xorb sx sy)
+ | B754_zero sx, B754_finite sy _ _ _ => B754_zero (xorb sx sy)
+ | B754_zero sx, B754_zero sy => B754_zero (xorb sx sy)
+ | B754_finite sx mx ex Hx, B754_finite sy my ey Hy =>
+ FF2B _ (proj1 (Bmult_correct_aux m sx mx ex Hx sy my ey Hy))
+ end.
+
+Theorem Bmult_correct :
+ forall m x y,
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (B2R x * B2R y))) (bpow radix2 emax) then
+ B2R (Bmult m x y) = round radix2 fexp (round_mode m) (B2R x * B2R y) /\
+ is_finite (Bmult m x y) = andb (is_finite x) (is_finite y)
+ else
+ B2FF (Bmult m x y) = binary_overflow m (xorb (Bsign x) (Bsign y)).
+Proof.
+intros m [sx|sx| |sx mx ex Hx] [sy|sy| |sy my ey Hy] ;
+ try ( rewrite ?Rmult_0_r, ?Rmult_0_l, round_0, Rabs_R0, Rlt_bool_true ; [ split ; apply refl_equal | apply bpow_gt_0 | auto with typeclass_instances ] ).
+simpl.
+case Bmult_correct_aux.
+intros H1.
+case Rlt_bool.
+intros (H2, H3).
+split.
+now rewrite B2R_FF2B.
+now rewrite is_finite_FF2B.
+intros H2.
+now rewrite B2FF_FF2B.
+Qed.
+
+Definition Bmult_FF m x y :=
+ match x, y with
+ | F754_nan, _ => x
+ | _, F754_nan => y
+ | F754_infinity sx, F754_infinity sy => F754_infinity (xorb sx sy)
+ | F754_infinity sx, F754_finite sy _ _ => F754_infinity (xorb sx sy)
+ | F754_finite sx _ _, F754_infinity sy => F754_infinity (xorb sx sy)
+ | F754_infinity _, F754_zero _ => F754_nan
+ | F754_zero _, F754_infinity _ => F754_nan
+ | F754_finite sx _ _, F754_zero sy => F754_zero (xorb sx sy)
+ | F754_zero sx, F754_finite sy _ _ => F754_zero (xorb sx sy)
+ | F754_zero sx, F754_zero sy => F754_zero (xorb sx sy)
+ | F754_finite sx mx ex, F754_finite sy my ey =>
+ binary_round_aux m (xorb sx sy) (mx * my) (ex + ey) loc_Exact
+ end.
+
+Theorem B2FF_Bmult :
+ forall m x y,
+ B2FF (Bmult m x y) = Bmult_FF m (B2FF x) (B2FF y).
+Proof.
+intros m [sx|sx| |sx mx ex Hx] [sy|sy| |sy my ey Hy] ; try easy.
+apply B2FF_FF2B.
+Qed.
+
+
+Definition shl_align mx ex ex' :=
+ match (ex' - ex)%Z with
+ | Zneg d => (shift_pos d mx, ex')
+ | _ => (mx, ex)
+ end.
+
+Theorem shl_align_correct :
+ forall mx ex ex',
+ let (mx', ex'') := shl_align mx ex ex' in
+ F2R (Float radix2 (Zpos mx) ex) = F2R (Float radix2 (Zpos mx') ex'') /\
+ (ex'' <= ex')%Z.
+Proof.
+intros mx ex ex'.
+unfold shl_align.
+case_eq (ex' - ex)%Z.
+(* d = 0 *)
+intros H.
+repeat split.
+rewrite Zminus_eq with (1 := H).
+apply Zle_refl.
+(* d > 0 *)
+intros d Hd.
+repeat split.
+replace ex' with (ex' - ex + ex)%Z by ring.
+rewrite Hd.
+pattern ex at 1 ; rewrite <- Zplus_0_l.
+now apply Zplus_le_compat_r.
+(* d < 0 *)
+intros d Hd.
+rewrite shift_pos_correct, Zmult_comm.
+change (Zpower_pos 2 d) with (Zpower radix2 (Zpos d)).
+change (Zpos d) with (Zopp (Zneg d)).
+rewrite <- Hd.
+split.
+replace (- (ex' - ex))%Z with (ex - ex')%Z by ring.
+apply F2R_change_exp.
+apply Zle_0_minus_le.
+replace (ex - ex')%Z with (- (ex' - ex))%Z by ring.
+now rewrite Hd.
+apply Zle_refl.
+Qed.
+
+Theorem snd_shl_align :
+ forall mx ex ex',
+ (ex' <= ex)%Z ->
+ snd (shl_align mx ex ex') = ex'.
+Proof.
+intros mx ex ex' He.
+unfold shl_align.
+case_eq (ex' - ex)%Z ; simpl.
+intros H.
+now rewrite Zminus_eq with (1 := H).
+intros p.
+clear -He ; zify ; omega.
+intros.
+apply refl_equal.
+Qed.
+
+Definition shl_align_fexp mx ex :=
+ shl_align mx ex (fexp (Z_of_nat (S (digits2_Pnat mx)) + ex)).
+
+Theorem shl_align_fexp_correct :
+ forall mx ex,
+ let (mx', ex') := shl_align_fexp mx ex in
+ F2R (Float radix2 (Zpos mx) ex) = F2R (Float radix2 (Zpos mx') ex') /\
+ (ex' <= fexp (Zdigits radix2 (Zpos mx') + ex'))%Z.
+Proof.
+intros mx ex.
+unfold shl_align_fexp.
+generalize (shl_align_correct mx ex (fexp (Z_of_nat (S (digits2_Pnat mx)) + ex))).
+rewrite Z_of_nat_S_digits2_Pnat.
+case shl_align.
+intros mx' ex' (H1, H2).
+split.
+exact H1.
+rewrite <- ln_beta_F2R_Zdigits. 2: easy.
+rewrite <- H1.
+now rewrite ln_beta_F2R_Zdigits.
+Qed.
+
+Definition binary_round m sx mx ex :=
+ let '(mz, ez) := shl_align_fexp mx ex in binary_round_aux m sx mz ez loc_Exact.
+
+Theorem binary_round_correct :
+ forall m sx mx ex,
+ let z := binary_round m sx mx ex in
+ valid_binary z = true /\
+ let x := F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex) in
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) x)) (bpow radix2 emax) then
+ FF2R radix2 z = round radix2 fexp (round_mode m) x /\
+ is_finite_FF z = true
+ else
+ z = binary_overflow m sx.
+Proof.
+intros m sx mx ex.
+unfold binary_round.
+generalize (shl_align_fexp_correct mx ex).
+destruct (shl_align_fexp mx ex) as (mz, ez).
+intros (H1, H2).
+set (x := F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex)).
+replace sx with (Rlt_bool x 0).
+apply binary_round_aux_correct.
+constructor.
+unfold x.
+now rewrite <- F2R_Zabs, abs_cond_Zopp.
+exact H2.
+unfold x.
+case sx.
+apply Rlt_bool_true.
+now apply F2R_lt_0_compat.
+apply Rlt_bool_false.
+now apply F2R_ge_0_compat.
+Qed.
+
+Definition binary_normalize mode m e szero :=
+ match m with
+ | Z0 => B754_zero szero
+ | Zpos m => FF2B _ (proj1 (binary_round_correct mode false m e))
+ | Zneg m => FF2B _ (proj1 (binary_round_correct mode true m e))
+ end.
+
+Theorem binary_normalize_correct :
+ forall m mx ex szero,
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (F2R (Float radix2 mx ex)))) (bpow radix2 emax) then
+ B2R (binary_normalize m mx ex szero) = round radix2 fexp (round_mode m) (F2R (Float radix2 mx ex)) /\
+ is_finite (binary_normalize m mx ex szero) = true
+ else
+ B2FF (binary_normalize m mx ex szero) = binary_overflow m (Rlt_bool (F2R (Float radix2 mx ex)) 0).
+Proof with auto with typeclass_instances.
+intros m mx ez szero.
+destruct mx as [|mz|mz] ; simpl.
+rewrite F2R_0, round_0, Rabs_R0, Rlt_bool_true...
+apply bpow_gt_0.
+(* . mz > 0 *)
+generalize (binary_round_correct m false mz ez).
+simpl.
+case Rlt_bool_spec.
+intros _ (Vz, (Rz, Rz')).
+split.
+now rewrite B2R_FF2B.
+now rewrite is_finite_FF2B.
+intros Hz' (Vz, Rz).
+rewrite B2FF_FF2B, Rz.
+apply f_equal.
+apply sym_eq.
+apply Rlt_bool_false.
+now apply F2R_ge_0_compat.
+(* . mz < 0 *)
+generalize (binary_round_correct m true mz ez).
+simpl.
+case Rlt_bool_spec.
+intros _ (Vz, (Rz, Rz')).
+split.
+now rewrite B2R_FF2B.
+now rewrite is_finite_FF2B.
+intros Hz' (Vz, Rz).
+rewrite B2FF_FF2B, Rz.
+apply f_equal.
+apply sym_eq.
+apply Rlt_bool_true.
+now apply F2R_lt_0_compat.
+Qed.
+
+(** Addition *)
+Definition Bplus m x y :=
+ match x, y with
+ | B754_nan, _ => x
+ | _, B754_nan => y
+ | B754_infinity sx, B754_infinity sy =>
+ if Bool.eqb sx sy then x else B754_nan
+ | B754_infinity _, _ => x
+ | _, B754_infinity _ => y
+ | B754_zero sx, B754_zero sy =>
+ if Bool.eqb sx sy then x else
+ match m with mode_DN => B754_zero true | _ => B754_zero false end
+ | B754_zero _, _ => y
+ | _, B754_zero _ => x
+ | B754_finite sx mx ex Hx, B754_finite sy my ey Hy =>
+ let ez := Zmin ex ey in
+ binary_normalize m (Zplus (cond_Zopp sx (Zpos (fst (shl_align mx ex ez)))) (cond_Zopp sy (Zpos (fst (shl_align my ey ez)))))
+ ez (match m with mode_DN => true | _ => false end)
+ end.
+
+Theorem Bplus_correct :
+ forall m x y,
+ is_finite x = true ->
+ is_finite y = true ->
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (B2R x + B2R y))) (bpow radix2 emax) then
+ B2R (Bplus m x y) = round radix2 fexp (round_mode m) (B2R x + B2R y) /\
+ is_finite (Bplus m x y) = true
+ else
+ (B2FF (Bplus m x y) = binary_overflow m (Bsign x) /\ Bsign x = Bsign y).
+Proof with auto with typeclass_instances.
+intros m [sx|sx| |sx mx ex Hx] [sy|sy| |sy my ey Hy] Fx Fy ; try easy.
+(* *)
+rewrite Rplus_0_r, round_0, Rabs_R0, Rlt_bool_true...
+simpl.
+case (Bool.eqb sx sy) ; try easy.
+now case m.
+apply bpow_gt_0.
+(* *)
+rewrite Rplus_0_l, round_generic, Rlt_bool_true...
+apply abs_B2R_lt_emax.
+apply generic_format_B2R.
+(* *)
+rewrite Rplus_0_r, round_generic, Rlt_bool_true...
+apply abs_B2R_lt_emax.
+apply generic_format_B2R.
+(* *)
+clear Fx Fy.
+simpl.
+set (szero := match m with mode_DN => true | _ => false end).
+set (ez := Zmin ex ey).
+set (mz := (cond_Zopp sx (Zpos (fst (shl_align mx ex ez))) + cond_Zopp sy (Zpos (fst (shl_align my ey ez))))%Z).
+assert (Hp: (F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex) +
+ F2R (Float radix2 (cond_Zopp sy (Zpos my)) ey))%R = F2R (Float radix2 mz ez)).
+rewrite 2!F2R_cond_Zopp.
+generalize (shl_align_correct mx ex ez).
+generalize (shl_align_correct my ey ez).
+generalize (snd_shl_align mx ex ez (Zle_min_l ex ey)).
+generalize (snd_shl_align my ey ez (Zle_min_r ex ey)).
+destruct (shl_align mx ex ez) as (mx', ex').
+destruct (shl_align my ey ez) as (my', ey').
+simpl.
+intros H1 H2.
+rewrite H1, H2.
+clear H1 H2.
+intros (H1, _) (H2, _).
+rewrite H1, H2.
+clear H1 H2.
+rewrite <- 2!F2R_cond_Zopp.
+unfold F2R. simpl.
+now rewrite <- Rmult_plus_distr_r, <- Z2R_plus.
+rewrite Hp.
+assert (Sz: (bpow radix2 emax <= Rabs (round radix2 fexp (round_mode m) (F2R (Float radix2 mz ez))))%R -> sx = Rlt_bool (F2R (Float radix2 mz ez)) 0 /\ sx = sy).
+(* . *)
+rewrite <- Hp.
+intros Bz.
+destruct (Bool.bool_dec sx sy) as [Hs|Hs].
+(* .. *)
+refine (conj _ Hs).
+rewrite Hs.
+apply sym_eq.
+case sy.
+apply Rlt_bool_true.
+rewrite <- (Rplus_0_r 0).
+apply Rplus_lt_compat.
+now apply F2R_lt_0_compat.
+now apply F2R_lt_0_compat.
+apply Rlt_bool_false.
+rewrite <- (Rplus_0_r 0).
+apply Rplus_le_compat.
+now apply F2R_ge_0_compat.
+now apply F2R_ge_0_compat.
+(* .. *)
+elim Rle_not_lt with (1 := Bz).
+generalize (bounded_lt_emax _ _ Hx) (bounded_lt_emax _ _ Hy) (andb_prop _ _ Hx) (andb_prop _ _ Hy).
+intros Bx By (Hx',_) (Hy',_).
+generalize (canonic_canonic_mantissa sx _ _ Hx') (canonic_canonic_mantissa sy _ _ Hy').
+clear -Bx By Hs.
+intros Cx Cy.
+destruct sx.
+(* ... *)
+destruct sy.
+now elim Hs.
+clear Hs.
+apply Rabs_lt.
+split.
+apply Rlt_le_trans with (F2R (Float radix2 (cond_Zopp true (Zpos mx)) ex)).
+rewrite F2R_Zopp.
+now apply Ropp_lt_contravar.
+apply round_ge_generic...
+now apply generic_format_canonic.
+pattern (F2R (Float radix2 (cond_Zopp true (Zpos mx)) ex)) at 1 ; rewrite <- Rplus_0_r.
+apply Rplus_le_compat_l.
+now apply F2R_ge_0_compat.
+apply Rle_lt_trans with (2 := By).
+apply round_le_generic...
+now apply generic_format_canonic.
+rewrite <- (Rplus_0_l (F2R (Float radix2 (Zpos my) ey))).
+apply Rplus_le_compat_r.
+now apply F2R_le_0_compat.
+(* ... *)
+destruct sy.
+2: now elim Hs.
+clear Hs.
+apply Rabs_lt.
+split.
+apply Rlt_le_trans with (F2R (Float radix2 (cond_Zopp true (Zpos my)) ey)).
+rewrite F2R_Zopp.
+now apply Ropp_lt_contravar.
+apply round_ge_generic...
+now apply generic_format_canonic.
+pattern (F2R (Float radix2 (cond_Zopp true (Zpos my)) ey)) at 1 ; rewrite <- Rplus_0_l.
+apply Rplus_le_compat_r.
+now apply F2R_ge_0_compat.
+apply Rle_lt_trans with (2 := Bx).
+apply round_le_generic...
+now apply generic_format_canonic.
+rewrite <- (Rplus_0_r (F2R (Float radix2 (Zpos mx) ex))).
+apply Rplus_le_compat_l.
+now apply F2R_le_0_compat.
+(* . *)
+generalize (binary_normalize_correct m mz ez szero).
+case Rlt_bool_spec.
+easy.
+intros Hz' Vz.
+specialize (Sz Hz').
+split.
+rewrite Vz.
+now apply f_equal.
+apply Sz.
+Qed.
+
+Definition Bminus m x y := Bplus m x (Bopp y).
+
+Theorem Bminus_correct :
+ forall m x y,
+ is_finite x = true ->
+ is_finite y = true ->
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (B2R x - B2R y))) (bpow radix2 emax) then
+ B2R (Bminus m x y) = round radix2 fexp (round_mode m) (B2R x - B2R y) /\
+ is_finite (Bminus m x y) = true
+ else
+ (B2FF (Bminus m x y) = binary_overflow m (Bsign x) /\ Bsign x = negb (Bsign y)).
+Proof with auto with typeclass_instances.
+intros m x y Fx Fy.
+replace (negb (Bsign y)) with (Bsign (Bopp y)).
+unfold Rminus.
+rewrite <- B2R_Bopp.
+apply Bplus_correct.
+exact Fx.
+now rewrite is_finite_Bopp.
+now destruct y as [ | | | ].
+Qed.
+
+(** Division *)
+Definition Fdiv_core_binary m1 e1 m2 e2 :=
+ let d1 := Zdigits2 m1 in
+ let d2 := Zdigits2 m2 in
+ let e := (e1 - e2)%Z in
+ let (m, e') :=
+ match (d2 + prec - d1)%Z with
+ | Zpos p => (m1 * Zpower_pos 2 p, e + Zneg p)%Z
+ | _ => (m1, e)
+ end in
+ let '(q, r) := Zdiv_eucl m m2 in
+ (q, e', new_location m2 r loc_Exact).
+
+Lemma Bdiv_correct_aux :
+ forall m sx mx ex (Hx : bounded mx ex = true) sy my ey (Hy : bounded my ey = true),
+ let x := F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex) in
+ let y := F2R (Float radix2 (cond_Zopp sy (Zpos my)) ey) in
+ let z :=
+ let '(mz, ez, lz) := Fdiv_core_binary (Zpos mx) ex (Zpos my) ey in
+ match mz with
+ | Zpos mz => binary_round_aux m (xorb sx sy) mz ez lz
+ | _ => F754_nan (* dummy *)
+ end in
+ valid_binary z = true /\
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (x / y))) (bpow radix2 emax) then
+ FF2R radix2 z = round radix2 fexp (round_mode m) (x / y) /\
+ is_finite_FF z = true
+ else
+ z = binary_overflow m (xorb sx sy).
+Proof.
+intros m sx mx ex Hx sy my ey Hy.
+replace (Fdiv_core_binary (Zpos mx) ex (Zpos my) ey) with (Fdiv_core radix2 prec (Zpos mx) ex (Zpos my) ey).
+2: now unfold Fdiv_core_binary ; rewrite 2!Zdigits2_Zdigits.
+refine (_ (Fdiv_core_correct radix2 prec (Zpos mx) ex (Zpos my) ey _ _ _)) ; try easy.
+destruct (Fdiv_core radix2 prec (Zpos mx) ex (Zpos my) ey) as ((mz, ez), lz).
+intros (Pz, Bz).
+simpl.
+replace (xorb sx sy) with (Rlt_bool (F2R (Float radix2 (cond_Zopp sx (Zpos mx)) ex) *
+ / F2R (Float radix2 (cond_Zopp sy (Zpos my)) ey)) 0).
+unfold Rdiv.
+destruct mz as [|mz|mz].
+(* . mz = 0 *)
+elim (Zlt_irrefl prec).
+now apply Zle_lt_trans with Z0.
+(* . mz > 0 *)
+apply binary_round_aux_correct.
+rewrite Rabs_mult, Rabs_Rinv.
+now rewrite <- 2!F2R_Zabs, 2!abs_cond_Zopp.
+case sy.
+apply Rlt_not_eq.
+now apply F2R_lt_0_compat.
+apply Rgt_not_eq.
+now apply F2R_gt_0_compat.
+revert Pz.
+generalize (Zdigits radix2 (Zpos mz)).
+unfold fexp, FLT_exp.
+clear.
+intros ; zify ; subst.
+omega.
+(* . mz < 0 *)
+elim Rlt_not_le with (1 := proj2 (inbetween_float_bounds _ _ _ _ _ Bz)).
+apply Rle_trans with R0.
+apply F2R_le_0_compat.
+now case mz.
+apply Rmult_le_pos.
+now apply F2R_ge_0_compat.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply F2R_gt_0_compat.
+(* *)
+case sy ; simpl.
+change (Zneg my) with (Zopp (Zpos my)).
+rewrite F2R_Zopp.
+rewrite <- Ropp_inv_permute.
+rewrite Ropp_mult_distr_r_reverse.
+case sx ; simpl.
+apply Rlt_bool_false.
+rewrite <- Ropp_mult_distr_l_reverse.
+apply Rmult_le_pos.
+rewrite <- F2R_opp.
+now apply F2R_ge_0_compat.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply F2R_gt_0_compat.
+apply Rlt_bool_true.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply Rmult_lt_0_compat.
+now apply F2R_gt_0_compat.
+apply Rinv_0_lt_compat.
+now apply F2R_gt_0_compat.
+apply Rgt_not_eq.
+now apply F2R_gt_0_compat.
+case sx.
+apply Rlt_bool_true.
+rewrite F2R_Zopp.
+rewrite Ropp_mult_distr_l_reverse.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply Rmult_lt_0_compat.
+now apply F2R_gt_0_compat.
+apply Rinv_0_lt_compat.
+now apply F2R_gt_0_compat.
+apply Rlt_bool_false.
+apply Rmult_le_pos.
+now apply F2R_ge_0_compat.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply F2R_gt_0_compat.
+Qed.
+
+Definition Bdiv m x y :=
+ match x, y with
+ | B754_nan, _ => x
+ | _, B754_nan => y
+ | B754_infinity sx, B754_infinity sy => B754_nan
+ | B754_infinity sx, B754_finite sy _ _ _ => B754_infinity (xorb sx sy)
+ | B754_finite sx _ _ _, B754_infinity sy => B754_zero (xorb sx sy)
+ | B754_infinity sx, B754_zero sy => B754_infinity (xorb sx sy)
+ | B754_zero sx, B754_infinity sy => B754_zero (xorb sx sy)
+ | B754_finite sx _ _ _, B754_zero sy => B754_infinity (xorb sx sy)
+ | B754_zero sx, B754_finite sy _ _ _ => B754_zero (xorb sx sy)
+ | B754_zero sx, B754_zero sy => B754_nan
+ | B754_finite sx mx ex Hx, B754_finite sy my ey Hy =>
+ FF2B _ (proj1 (Bdiv_correct_aux m sx mx ex Hx sy my ey Hy))
+ end.
+
+Theorem Bdiv_correct :
+ forall m x y,
+ B2R y <> R0 ->
+ if Rlt_bool (Rabs (round radix2 fexp (round_mode m) (B2R x / B2R y))) (bpow radix2 emax) then
+ B2R (Bdiv m x y) = round radix2 fexp (round_mode m) (B2R x / B2R y) /\
+ is_finite (Bdiv m x y) = is_finite x
+ else
+ B2FF (Bdiv m x y) = binary_overflow m (xorb (Bsign x) (Bsign y)).
+Proof.
+intros m x [sy|sy| |sy my ey Hy] Zy ; try now elim Zy.
+revert x.
+unfold Rdiv.
+intros [sx|sx| |sx mx ex Hx] ;
+ try ( rewrite Rmult_0_l, round_0, Rabs_R0, Rlt_bool_true ; [ split ; apply refl_equal | apply bpow_gt_0 | auto with typeclass_instances ] ).
+simpl.
+case Bdiv_correct_aux.
+intros H1.
+unfold Rdiv.
+case Rlt_bool.
+intros (H2, H3).
+split.
+now rewrite B2R_FF2B.
+now rewrite is_finite_FF2B.
+intros H2.
+now rewrite B2FF_FF2B.
+Qed.
+
+(** Square root *)
+Definition Fsqrt_core_binary m e :=
+ let d := Zdigits2 m in
+ let s := Zmax (2 * prec - d) 0 in
+ let e' := (e - s)%Z in
+ let (s', e'') := if Zeven e' then (s, e') else (s + 1, e' - 1)%Z in
+ let m' :=
+ match s' with
+ | Zpos p => (m * Zpower_pos 2 p)%Z
+ | _ => m
+ end in
+ let (q, r) := Zsqrt m' in
+ let l :=
+ if Zeq_bool r 0 then loc_Exact
+ else loc_Inexact (if Zle_bool r q then Lt else Gt) in
+ (q, Zdiv2 e'', l).
+
+Lemma Bsqrt_correct_aux :
+ forall m mx ex (Hx : bounded mx ex = true),
+ let x := F2R (Float radix2 (Zpos mx) ex) in
+ let z :=
+ let '(mz, ez, lz) := Fsqrt_core_binary (Zpos mx) ex in
+ match mz with
+ | Zpos mz => binary_round_aux m false mz ez lz
+ | _ => F754_nan (* dummy *)
+ end in
+ valid_binary z = true /\
+ FF2R radix2 z = round radix2 fexp (round_mode m) (sqrt x) /\
+ is_finite_FF z = true.
+Proof with auto with typeclass_instances.
+intros m mx ex Hx.
+replace (Fsqrt_core_binary (Zpos mx) ex) with (Fsqrt_core radix2 prec (Zpos mx) ex).
+2: now unfold Fsqrt_core_binary ; rewrite Zdigits2_Zdigits.
+simpl.
+refine (_ (Fsqrt_core_correct radix2 prec (Zpos mx) ex _)) ; try easy.
+destruct (Fsqrt_core radix2 prec (Zpos mx) ex) as ((mz, ez), lz).
+intros (Pz, Bz).
+destruct mz as [|mz|mz].
+(* . mz = 0 *)
+elim (Zlt_irrefl prec).
+now apply Zle_lt_trans with Z0.
+(* . mz > 0 *)
+refine (_ (binary_round_aux_correct m (sqrt (F2R (Float radix2 (Zpos mx) ex))) mz ez lz _ _)).
+rewrite Rlt_bool_false. 2: apply sqrt_ge_0.
+rewrite Rlt_bool_true.
+easy.
+(* .. *)
+rewrite Rabs_pos_eq.
+refine (_ (relative_error_FLT_ex radix2 emin prec (prec_gt_0 prec) (round_mode m) (sqrt (F2R (Float radix2 (Zpos mx) ex))) _)).
+fold fexp.
+intros (eps, (Heps, Hr)).
+rewrite Hr.
+assert (Heps': (Rabs eps < 1)%R).
+apply Rlt_le_trans with (1 := Heps).
+fold (bpow radix2 0).
+apply bpow_le.
+generalize (prec_gt_0 prec).
+clear ; omega.
+apply Rsqr_incrst_0.
+3: apply bpow_ge_0.
+rewrite Rsqr_mult.
+rewrite Rsqr_sqrt.
+2: now apply F2R_ge_0_compat.
+unfold Rsqr.
+apply Rmult_ge_0_gt_0_lt_compat.
+apply Rle_ge.
+apply Rle_0_sqr.
+apply bpow_gt_0.
+now apply bounded_lt_emax.
+apply Rlt_le_trans with 4%R.
+apply Rsqr_incrst_1.
+apply Rplus_lt_compat_l.
+apply (Rabs_lt_inv _ _ Heps').
+rewrite <- (Rplus_opp_r 1).
+apply Rplus_le_compat_l.
+apply Rlt_le.
+apply (Rabs_lt_inv _ _ Heps').
+now apply (Z2R_le 0 2).
+change 4%R with (bpow radix2 2).
+apply bpow_le.
+generalize (prec_gt_0 prec).
+clear -Hmax ; omega.
+apply Rmult_le_pos.
+apply sqrt_ge_0.
+rewrite <- (Rplus_opp_r 1).
+apply Rplus_le_compat_l.
+apply Rlt_le.
+apply (Rabs_lt_inv _ _ Heps').
+rewrite Rabs_pos_eq.
+2: apply sqrt_ge_0.
+apply Rsqr_incr_0.
+2: apply bpow_ge_0.
+2: apply sqrt_ge_0.
+rewrite Rsqr_sqrt.
+2: now apply F2R_ge_0_compat.
+apply Rle_trans with (bpow radix2 emin).
+unfold Rsqr.
+rewrite <- bpow_plus.
+apply bpow_le.
+unfold emin.
+clear -Hmax ; omega.
+apply generic_format_ge_bpow with fexp.
+intros.
+apply Zle_max_r.
+now apply F2R_gt_0_compat.
+apply generic_format_canonic.
+apply (canonic_canonic_mantissa false).
+apply (andb_prop _ _ Hx).
+(* .. *)
+apply round_ge_generic...
+apply generic_format_0.
+apply sqrt_ge_0.
+rewrite Rabs_pos_eq.
+exact Bz.
+apply sqrt_ge_0.
+revert Pz.
+generalize (Zdigits radix2 (Zpos mz)).
+unfold fexp, FLT_exp.
+clear.
+intros ; zify ; subst.
+omega.
+(* . mz < 0 *)
+elim Rlt_not_le with (1 := proj2 (inbetween_float_bounds _ _ _ _ _ Bz)).
+apply Rle_trans with R0.
+apply F2R_le_0_compat.
+now case mz.
+apply sqrt_ge_0.
+Qed.
+
+Definition Bsqrt m x :=
+ match x with
+ | B754_nan => x
+ | B754_infinity false => x
+ | B754_infinity true => B754_nan
+ | B754_finite true _ _ _ => B754_nan
+ | B754_zero _ => x
+ | B754_finite sx mx ex Hx =>
+ FF2B _ (proj1 (Bsqrt_correct_aux m mx ex Hx))
+ end.
+
+Theorem Bsqrt_correct :
+ forall m x,
+ B2R (Bsqrt m x) = round radix2 fexp (round_mode m) (sqrt (B2R x)) /\
+ is_finite (Bsqrt m x) = match x with B754_zero _ => true | B754_finite false _ _ _ => true | _ => false end.
+Proof.
+intros m [sx|[|]| |sx mx ex Hx] ; try ( now simpl ; rewrite sqrt_0, round_0 ; auto with typeclass_instances ).
+simpl.
+case Bsqrt_correct_aux.
+intros H1 (H2, H3).
+case sx.
+refine (conj _ (refl_equal false)).
+apply sym_eq.
+unfold sqrt.
+case Rcase_abs.
+intros _.
+apply round_0.
+auto with typeclass_instances.
+intros H.
+elim Rge_not_lt with (1 := H).
+now apply F2R_lt_0_compat.
+split.
+now rewrite B2R_FF2B.
+now rewrite is_finite_FF2B.
+Qed.
+
+End Binary.
diff --git a/flocq/Appli/Fappli_IEEE_bits.v b/flocq/Appli/Fappli_IEEE_bits.v
new file mode 100644
index 0000000..06ed21e
--- /dev/null
+++ b/flocq/Appli/Fappli_IEEE_bits.v
@@ -0,0 +1,546 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * IEEE-754 encoding of binary floating-point data *)
+Require Import Fcore.
+Require Import Fcore_digits.
+Require Import Fcalc_digits.
+Require Import Fappli_IEEE.
+
+Section Binary_Bits.
+
+(** Number of bits for the fraction and exponent *)
+Variable mw ew : Z.
+Hypothesis Hmw : (0 < mw)%Z.
+Hypothesis Hew : (0 < ew)%Z.
+
+Let emax := Zpower 2 (ew - 1).
+Let prec := (mw + 1)%Z.
+Let emin := (3 - emax - prec)%Z.
+Let binary_float := binary_float prec emax.
+
+Let Hprec : (0 < prec)%Z.
+unfold prec.
+apply Zle_lt_succ.
+now apply Zlt_le_weak.
+Qed.
+
+Let Hm_gt_0 : (0 < 2^mw)%Z.
+apply (Zpower_gt_0 radix2).
+now apply Zlt_le_weak.
+Qed.
+
+Let He_gt_0 : (0 < 2^ew)%Z.
+apply (Zpower_gt_0 radix2).
+now apply Zlt_le_weak.
+Qed.
+
+Hypothesis Hmax : (prec < emax)%Z.
+
+Definition join_bits (s : bool) m e :=
+ (((if s then Zpower 2 ew else 0) + e) * Zpower 2 mw + m)%Z.
+
+Definition split_bits x :=
+ let mm := Zpower 2 mw in
+ let em := Zpower 2 ew in
+ (Zle_bool (mm * em) x, Zmod x mm, Zmod (Zdiv x mm) em)%Z.
+
+Theorem split_join_bits :
+ forall s m e,
+ (0 <= m < Zpower 2 mw)%Z ->
+ (0 <= e < Zpower 2 ew)%Z ->
+ split_bits (join_bits s m e) = (s, m, e).
+Proof.
+intros s m e Hm He.
+unfold split_bits, join_bits.
+apply f_equal2.
+apply f_equal2.
+(* *)
+case s.
+apply Zle_bool_true.
+apply Zle_0_minus_le.
+ring_simplify.
+apply Zplus_le_0_compat.
+apply Zmult_le_0_compat.
+apply He.
+now apply Zlt_le_weak.
+apply Hm.
+apply Zle_bool_false.
+apply Zplus_lt_reg_l with (2^mw * (-e))%Z.
+replace (2 ^ mw * - e + ((0 + e) * 2 ^ mw + m))%Z with (m * 1)%Z by ring.
+rewrite <- Zmult_plus_distr_r.
+apply Zlt_le_trans with (2^mw * 1)%Z.
+now apply Zmult_lt_compat_r.
+apply Zmult_le_compat_l.
+clear -He. omega.
+now apply Zlt_le_weak.
+(* *)
+rewrite Zplus_comm.
+rewrite Z_mod_plus_full.
+now apply Zmod_small.
+(* *)
+rewrite Z_div_plus_full_l.
+rewrite Zdiv_small with (1 := Hm).
+rewrite Zplus_0_r.
+case s.
+replace (2^ew + e)%Z with (e + 1 * 2^ew)%Z by ring.
+rewrite Z_mod_plus_full.
+now apply Zmod_small.
+now apply Zmod_small.
+now apply Zgt_not_eq.
+Qed.
+
+Theorem join_split_bits :
+ forall x,
+ (0 <= x < Zpower 2 (mw + ew + 1))%Z ->
+ let '(s, m, e) := split_bits x in
+ join_bits s m e = x.
+Proof.
+intros x Hx.
+unfold split_bits, join_bits.
+pattern x at 4 ; rewrite Z_div_mod_eq_full with x (2^mw)%Z.
+apply (f_equal (fun v => (v + _)%Z)).
+rewrite Zmult_comm.
+apply f_equal.
+pattern (x / (2^mw))%Z at 2 ; rewrite Z_div_mod_eq_full with (x / (2^mw))%Z (2^ew)%Z.
+apply (f_equal (fun v => (v + _)%Z)).
+replace (x / 2 ^ mw / 2 ^ ew)%Z with (if Zle_bool (2 ^ mw * 2 ^ ew) x then 1 else 0)%Z.
+case Zle_bool.
+now rewrite Zmult_1_r.
+now rewrite Zmult_0_r.
+rewrite Zdiv_Zdiv.
+apply sym_eq.
+case Zle_bool_spec ; intros Hs.
+apply Zle_antisym.
+cut (x / (2^mw * 2^ew) < 2)%Z. clear ; omega.
+apply Zdiv_lt_upper_bound.
+try apply Hx. (* 8.2/8.3 compatibility *)
+now apply Zmult_lt_0_compat.
+rewrite <- Zpower_exp ; try ( apply Zle_ge ; apply Zlt_le_weak ; assumption ).
+change 2%Z at 1 with (Zpower 2 1).
+rewrite <- Zpower_exp.
+now rewrite Zplus_comm.
+discriminate.
+apply Zle_ge.
+now apply Zplus_le_0_compat ; apply Zlt_le_weak.
+apply Zdiv_le_lower_bound.
+try apply Hx. (* 8.2/8.3 compatibility *)
+now apply Zmult_lt_0_compat.
+now rewrite Zmult_1_l.
+apply Zdiv_small.
+now split.
+now apply Zlt_le_weak.
+now apply Zlt_le_weak.
+now apply Zgt_not_eq.
+now apply Zgt_not_eq.
+Qed.
+
+Theorem split_bits_inj :
+ forall x y,
+ (0 <= x < Zpower 2 (mw + ew + 1))%Z ->
+ (0 <= y < Zpower 2 (mw + ew + 1))%Z ->
+ split_bits x = split_bits y ->
+ x = y.
+Proof.
+intros x y Hx Hy.
+generalize (join_split_bits x Hx) (join_split_bits y Hy).
+destruct (split_bits x) as ((sx, mx), ex).
+destruct (split_bits y) as ((sy, my), ey).
+intros Jx Jy H. revert Jx Jy.
+inversion_clear H.
+intros Jx Jy.
+now rewrite <- Jx.
+Qed.
+
+Definition bits_of_binary_float (x : binary_float) :=
+ match x with
+ | B754_zero sx => join_bits sx 0 0
+ | B754_infinity sx => join_bits sx 0 (Zpower 2 ew - 1)
+ | B754_nan => join_bits false (Zpower 2 mw - 1) (Zpower 2 ew - 1)
+ | B754_finite sx mx ex _ =>
+ if Zle_bool (Zpower 2 mw) (Zpos mx) then
+ join_bits sx (Zpos mx - Zpower 2 mw) (ex - emin + 1)
+ else
+ join_bits sx (Zpos mx) 0
+ end.
+
+Definition split_bits_of_binary_float (x : binary_float) :=
+ match x with
+ | B754_zero sx => (sx, 0, 0)%Z
+ | B754_infinity sx => (sx, 0, Zpower 2 ew - 1)%Z
+ | B754_nan => (false, Zpower 2 mw - 1, Zpower 2 ew - 1)%Z
+ | B754_finite sx mx ex _ =>
+ if Zle_bool (Zpower 2 mw) (Zpos mx) then
+ (sx, Zpos mx - Zpower 2 mw, ex - emin + 1)%Z
+ else
+ (sx, Zpos mx, 0)%Z
+ end.
+
+Theorem split_bits_of_binary_float_correct :
+ forall x,
+ split_bits (bits_of_binary_float x) = split_bits_of_binary_float x.
+Proof.
+intros [sx|sx| |sx mx ex Hx] ;
+ try ( simpl ; apply split_join_bits ; split ; try apply Zle_refl ; try apply Zlt_pred ; trivial ; omega ).
+unfold bits_of_binary_float, split_bits_of_binary_float.
+assert (Hf: (emin <= ex /\ Zdigits radix2 (Zpos mx) <= prec)%Z).
+destruct (andb_prop _ _ Hx) as (Hx', _).
+unfold canonic_mantissa in Hx'.
+rewrite Z_of_nat_S_digits2_Pnat in Hx'.
+generalize (Zeq_bool_eq _ _ Hx').
+unfold FLT_exp.
+change (Fcalc_digits.radix2) with radix2.
+unfold emin.
+clear ; zify ; omega.
+destruct (Zle_bool_spec (2^mw) (Zpos mx)) as [H|H] ;
+ apply split_join_bits ; try now split.
+(* *)
+split.
+clear -He_gt_0 H ; omega.
+cut (Zpos mx < 2 * 2^mw)%Z. clear ; omega.
+replace (2 * 2^mw)%Z with (2^prec)%Z.
+apply (Zpower_gt_Zdigits radix2 _ (Zpos mx)).
+apply Hf.
+unfold prec.
+rewrite Zplus_comm.
+apply Zpower_exp ; apply Zle_ge.
+discriminate.
+now apply Zlt_le_weak.
+(* *)
+split.
+generalize (proj1 Hf).
+clear ; omega.
+destruct (andb_prop _ _ Hx) as (_, Hx').
+unfold emin.
+replace (2^ew)%Z with (2 * emax)%Z.
+generalize (Zle_bool_imp_le _ _ Hx').
+clear ; omega.
+apply sym_eq.
+rewrite (Zsucc_pred ew).
+unfold Zsucc.
+rewrite Zplus_comm.
+apply Zpower_exp ; apply Zle_ge.
+discriminate.
+now apply Zlt_0_le_0_pred.
+Qed.
+
+Definition binary_float_of_bits_aux x :=
+ let '(sx, mx, ex) := split_bits x in
+ if Zeq_bool ex 0 then
+ match mx with
+ | Z0 => F754_zero sx
+ | Zpos px => F754_finite sx px emin
+ | Zneg _ => F754_nan (* dummy *)
+ end
+ else if Zeq_bool ex (Zpower 2 ew - 1) then
+ if Zeq_bool mx 0 then F754_infinity sx else F754_nan
+ else
+ match (mx + Zpower 2 mw)%Z with
+ | Zpos px => F754_finite sx px (ex + emin - 1)
+ | _ => F754_nan (* dummy *)
+ end.
+
+Lemma binary_float_of_bits_aux_correct :
+ forall x,
+ valid_binary prec emax (binary_float_of_bits_aux x) = true.
+Proof.
+intros x.
+unfold binary_float_of_bits_aux, split_bits.
+case Zeq_bool_spec ; intros He1.
+case_eq (x mod 2^mw)%Z ; try easy.
+(* subnormal *)
+intros px Hm.
+assert (Zdigits radix2 (Zpos px) <= mw)%Z.
+apply Zdigits_le_Zpower.
+simpl.
+rewrite <- Hm.
+eapply Z_mod_lt.
+now apply Zlt_gt.
+apply bounded_canonic_lt_emax ; try assumption.
+unfold canonic, canonic_exp.
+fold emin.
+rewrite ln_beta_F2R_Zdigits. 2: discriminate.
+unfold Fexp, FLT_exp.
+apply sym_eq.
+apply Zmax_right.
+clear -H Hprec.
+unfold prec ; omega.
+apply Rnot_le_lt.
+intros H0.
+refine (_ (ln_beta_le radix2 _ _ _ H0)).
+rewrite ln_beta_bpow.
+rewrite ln_beta_F2R_Zdigits. 2: discriminate.
+unfold emin, prec.
+apply Zlt_not_le.
+cut (0 < emax)%Z. clear -H Hew ; omega.
+apply (Zpower_gt_0 radix2).
+clear -Hew ; omega.
+apply bpow_gt_0.
+case Zeq_bool_spec ; intros He2.
+now case Zeq_bool.
+case_eq (x mod 2^mw + 2^mw)%Z ; try easy.
+(* normal *)
+intros px Hm.
+assert (prec = Zdigits radix2 (Zpos px)).
+(* . *)
+rewrite Zdigits_ln_beta. 2: discriminate.
+apply sym_eq.
+apply ln_beta_unique.
+rewrite <- Z2R_abs.
+unfold Zabs.
+replace (prec - 1)%Z with mw by ( unfold prec ; ring ).
+rewrite <- Z2R_Zpower with (1 := Zlt_le_weak _ _ Hmw).
+rewrite <- Z2R_Zpower. 2: now apply Zlt_le_weak.
+rewrite <- Hm.
+split.
+apply Z2R_le.
+change (radix2^mw)%Z with (0 + 2^mw)%Z.
+apply Zplus_le_compat_r.
+eapply Z_mod_lt.
+now apply Zlt_gt.
+apply Z2R_lt.
+unfold prec.
+rewrite Zpower_exp. 2: now apply Zle_ge ; apply Zlt_le_weak. 2: discriminate.
+rewrite <- Zplus_diag_eq_mult_2.
+apply Zplus_lt_compat_r.
+eapply Z_mod_lt.
+now apply Zlt_gt.
+(* . *)
+apply bounded_canonic_lt_emax ; try assumption.
+unfold canonic, canonic_exp.
+rewrite ln_beta_F2R_Zdigits. 2: discriminate.
+unfold Fexp, FLT_exp.
+rewrite <- H.
+set (ex := ((x / 2^mw) mod 2^ew)%Z).
+replace (prec + (ex + emin - 1) - prec)%Z with (ex + emin - 1)%Z by ring.
+apply sym_eq.
+apply Zmax_left.
+revert He1.
+fold ex.
+cut (0 <= ex)%Z.
+unfold emin.
+clear ; intros H1 H2 ; omega.
+eapply Z_mod_lt.
+apply Zlt_gt.
+apply (Zpower_gt_0 radix2).
+now apply Zlt_le_weak.
+apply Rnot_le_lt.
+intros H0.
+refine (_ (ln_beta_le radix2 _ _ _ H0)).
+rewrite ln_beta_bpow.
+rewrite ln_beta_F2R_Zdigits. 2: discriminate.
+rewrite <- H.
+apply Zlt_not_le.
+unfold emin.
+apply Zplus_lt_reg_r with (emax - 1)%Z.
+ring_simplify.
+revert He2.
+set (ex := ((x / 2^mw) mod 2^ew)%Z).
+cut (ex < 2^ew)%Z.
+replace (2^ew)%Z with (2 * emax)%Z.
+clear ; intros H1 H2 ; omega.
+replace ew with (1 + (ew - 1))%Z by ring.
+rewrite Zpower_exp.
+apply refl_equal.
+discriminate.
+clear -Hew ; omega.
+eapply Z_mod_lt.
+apply Zlt_gt.
+apply (Zpower_gt_0 radix2).
+now apply Zlt_le_weak.
+apply bpow_gt_0.
+Qed.
+
+Definition binary_float_of_bits x :=
+ FF2B prec emax _ (binary_float_of_bits_aux_correct x).
+
+Theorem binary_float_of_bits_of_binary_float :
+ forall x,
+ binary_float_of_bits (bits_of_binary_float x) = x.
+Proof.
+intros x.
+apply B2FF_inj.
+unfold binary_float_of_bits.
+rewrite B2FF_FF2B.
+unfold binary_float_of_bits_aux.
+rewrite split_bits_of_binary_float_correct.
+destruct x as [sx|sx| |sx mx ex Bx].
+apply refl_equal.
+(* *)
+simpl.
+rewrite Zeq_bool_false.
+now rewrite Zeq_bool_true.
+cut (1 < 2^ew)%Z. clear ; omega.
+now apply (Zpower_gt_1 radix2).
+(* *)
+simpl.
+rewrite Zeq_bool_false.
+rewrite Zeq_bool_true.
+rewrite Zeq_bool_false.
+apply refl_equal.
+cut (1 < 2^mw)%Z. clear ; omega.
+now apply (Zpower_gt_1 radix2).
+apply refl_equal.
+cut (1 < 2^ew)%Z. clear ; omega.
+now apply (Zpower_gt_1 radix2).
+(* *)
+unfold split_bits_of_binary_float.
+case Zle_bool_spec ; intros Hm.
+(* . *)
+rewrite Zeq_bool_false.
+rewrite Zeq_bool_false.
+now ring_simplify (Zpos mx - 2 ^ mw + 2 ^ mw)%Z (ex - emin + 1 + emin - 1)%Z.
+destruct (andb_prop _ _ Bx) as (_, H1).
+generalize (Zle_bool_imp_le _ _ H1).
+unfold emin.
+replace (2^ew)%Z with (2 * emax)%Z.
+clear ; omega.
+replace ew with (1 + (ew - 1))%Z by ring.
+rewrite Zpower_exp.
+apply refl_equal.
+discriminate.
+clear -Hew ; omega.
+destruct (andb_prop _ _ Bx) as (H1, _).
+generalize (Zeq_bool_eq _ _ H1).
+rewrite Z_of_nat_S_digits2_Pnat.
+unfold FLT_exp, emin.
+change Fcalc_digits.radix2 with radix2.
+generalize (Zdigits radix2 (Zpos mx)).
+clear.
+intros ; zify ; omega.
+(* . *)
+rewrite Zeq_bool_true. 2: apply refl_equal.
+simpl.
+apply f_equal.
+destruct (andb_prop _ _ Bx) as (H1, _).
+generalize (Zeq_bool_eq _ _ H1).
+rewrite Z_of_nat_S_digits2_Pnat.
+unfold FLT_exp, emin, prec.
+change Fcalc_digits.radix2 with radix2.
+generalize (Zdigits_le_Zpower radix2 _ (Zpos mx) Hm).
+generalize (Zdigits radix2 (Zpos mx)).
+clear.
+intros ; zify ; omega.
+Qed.
+
+Theorem bits_of_binary_float_of_bits :
+ forall x,
+ (0 <= x < 2^(mw+ew+1))%Z ->
+ binary_float_of_bits x <> B754_nan prec emax ->
+ bits_of_binary_float (binary_float_of_bits x) = x.
+Proof.
+intros x Hx.
+unfold binary_float_of_bits, bits_of_binary_float.
+set (Cx := binary_float_of_bits_aux_correct x).
+clearbody Cx.
+rewrite match_FF2B.
+revert Cx.
+generalize (join_split_bits x Hx).
+unfold binary_float_of_bits_aux.
+case_eq (split_bits x).
+intros (sx, mx) ex Sx.
+assert (Bm: (0 <= mx < 2^mw)%Z).
+inversion_clear Sx.
+apply Z_mod_lt.
+now apply Zlt_gt.
+case Zeq_bool_spec ; intros He1.
+(* subnormal *)
+case_eq mx.
+intros Hm Jx _ _.
+now rewrite He1 in Jx.
+intros px Hm Jx _ _.
+rewrite Zle_bool_false.
+now rewrite <- He1.
+now rewrite <- Hm.
+intros px Hm _ _ _.
+apply False_ind.
+apply Zle_not_lt with (1 := proj1 Bm).
+now rewrite Hm.
+case Zeq_bool_spec ; intros He2.
+(* infinity/nan *)
+case Zeq_bool_spec ; intros Hm.
+now rewrite Hm, He2.
+intros _ Cx Nx.
+now elim Nx.
+(* normal *)
+case_eq (mx + 2 ^ mw)%Z.
+intros Hm.
+apply False_ind.
+clear -Bm Hm ; omega.
+intros p Hm Jx Cx _.
+rewrite <- Hm.
+rewrite Zle_bool_true.
+now ring_simplify (mx + 2^mw - 2^mw)%Z (ex + emin - 1 - emin + 1)%Z.
+now apply (Zplus_le_compat_r 0).
+intros p Hm.
+apply False_ind.
+clear -Bm Hm ; zify ; omega.
+Qed.
+
+End Binary_Bits.
+
+(** Specialization for IEEE single precision operations *)
+Section B32_Bits.
+
+Definition binary32 := binary_float 24 128.
+
+Let Hprec : (0 < 24)%Z.
+apply refl_equal.
+Qed.
+
+Let Hprec_emax : (24 < 128)%Z.
+apply refl_equal.
+Qed.
+
+Definition b32_opp := Bopp 24 128.
+Definition b32_plus := Bplus _ _ Hprec Hprec_emax.
+Definition b32_minus := Bminus _ _ Hprec Hprec_emax.
+Definition b32_mult := Bmult _ _ Hprec Hprec_emax.
+Definition b32_div := Bdiv _ _ Hprec Hprec_emax.
+Definition b32_sqrt := Bsqrt _ _ Hprec Hprec_emax.
+
+Definition b32_of_bits : Z -> binary32 := binary_float_of_bits 23 8 (refl_equal _) (refl_equal _) (refl_equal _).
+Definition bits_of_b32 : binary32 -> Z := bits_of_binary_float 23 8.
+
+End B32_Bits.
+
+(** Specialization for IEEE double precision operations *)
+Section B64_Bits.
+
+Definition binary64 := binary_float 53 1024.
+
+Let Hprec : (0 < 53)%Z.
+apply refl_equal.
+Qed.
+
+Let Hprec_emax : (53 < 1024)%Z.
+apply refl_equal.
+Qed.
+
+Definition b64_opp := Bopp 53 1024.
+Definition b64_plus := Bplus _ _ Hprec Hprec_emax.
+Definition b64_minus := Bminus _ _ Hprec Hprec_emax.
+Definition b64_mult := Bmult _ _ Hprec Hprec_emax.
+Definition b64_div := Bdiv _ _ Hprec Hprec_emax.
+Definition b64_sqrt := Bsqrt _ _ Hprec Hprec_emax.
+
+Definition b64_of_bits : Z -> binary64 := binary_float_of_bits 52 11 (refl_equal _) (refl_equal _) (refl_equal _).
+Definition bits_of_b64 : binary64 -> Z := bits_of_binary_float 52 11.
+
+End B64_Bits.
diff --git a/flocq/Calc/Fcalc_bracket.v b/flocq/Calc/Fcalc_bracket.v
new file mode 100644
index 0000000..dd4bd97
--- /dev/null
+++ b/flocq/Calc/Fcalc_bracket.v
@@ -0,0 +1,688 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Locations: where a real number is positioned with respect to its rounded-down value in an arbitrary format. *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_float_prop.
+
+Section Fcalc_bracket.
+
+Variable d u : R.
+Hypothesis Hdu : (d < u)%R.
+
+Inductive location := loc_Exact | loc_Inexact : comparison -> location.
+
+Variable x : R.
+
+Definition inbetween_loc :=
+ match Rcompare x d with
+ | Gt => loc_Inexact (Rcompare x ((d + u) / 2))
+ | _ => loc_Exact
+ end.
+
+(** Locates a real number with respect to the middle of two other numbers. *)
+
+Inductive inbetween : location -> Prop :=
+ | inbetween_Exact : x = d -> inbetween loc_Exact
+ | inbetween_Inexact l : (d < x < u)%R -> Rcompare x ((d + u) / 2)%R = l -> inbetween (loc_Inexact l).
+
+Theorem inbetween_spec :
+ (d <= x < u)%R -> inbetween inbetween_loc.
+Proof.
+intros Hx.
+unfold inbetween_loc.
+destruct (Rcompare_spec x d) as [H|H|H].
+now elim Rle_not_lt with (1 := proj1 Hx).
+now constructor.
+constructor.
+now split.
+easy.
+Qed.
+
+Theorem inbetween_unique :
+ forall l l',
+ inbetween l -> inbetween l' -> l = l'.
+Proof.
+intros l l' Hl Hl'.
+inversion_clear Hl ; inversion_clear Hl'.
+apply refl_equal.
+rewrite H in H0.
+elim Rlt_irrefl with (1 := proj1 H0).
+rewrite H1 in H.
+elim Rlt_irrefl with (1 := proj1 H).
+apply f_equal.
+now rewrite <- H0.
+Qed.
+
+Section Fcalc_bracket_any.
+
+Variable l : location.
+
+Theorem inbetween_bounds :
+ inbetween l ->
+ (d <= x < u)%R.
+Proof.
+intros [Hx|l' Hx Hl] ; clear l.
+rewrite Hx.
+split.
+apply Rle_refl.
+exact Hdu.
+now split ; try apply Rlt_le.
+Qed.
+
+Theorem inbetween_bounds_not_Eq :
+ inbetween l ->
+ l <> loc_Exact ->
+ (d < x < u)%R.
+Proof.
+intros [Hx|l' Hx Hl] H.
+now elim H.
+exact Hx.
+Qed.
+
+End Fcalc_bracket_any.
+
+Theorem inbetween_distance_inexact :
+ forall l,
+ inbetween (loc_Inexact l) ->
+ Rcompare (x - d) (u - x) = l.
+Proof.
+intros l Hl.
+inversion_clear Hl as [|l' Hl' Hx].
+now rewrite Rcompare_middle.
+Qed.
+
+Theorem inbetween_distance_inexact_abs :
+ forall l,
+ inbetween (loc_Inexact l) ->
+ Rcompare (Rabs (d - x)) (Rabs (u - x)) = l.
+Proof.
+intros l Hl.
+rewrite Rabs_left1.
+rewrite Rabs_pos_eq.
+rewrite Ropp_minus_distr.
+now apply inbetween_distance_inexact.
+apply Rle_0_minus.
+apply Rlt_le.
+apply (inbetween_bounds _ Hl).
+apply Rle_minus.
+apply (inbetween_bounds _ Hl).
+Qed.
+
+End Fcalc_bracket.
+
+Theorem inbetween_ex :
+ forall d u l,
+ (d < u)%R ->
+ exists x,
+ inbetween d u x l.
+Proof.
+intros d u [|l] Hdu.
+exists d.
+now constructor.
+exists (d + match l with Lt => 1 | Eq => 2 | Gt => 3 end / 4 * (u - d))%R.
+constructor.
+(* *)
+set (v := (match l with Lt => 1 | Eq => 2 | Gt => 3 end / 4)%R).
+assert (0 < v < 1)%R.
+split.
+unfold v, Rdiv.
+apply Rmult_lt_0_compat.
+case l.
+now apply (Z2R_lt 0 2).
+now apply (Z2R_lt 0 1).
+now apply (Z2R_lt 0 3).
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 4).
+unfold v, Rdiv.
+apply Rmult_lt_reg_r with 4%R.
+now apply (Z2R_lt 0 4).
+rewrite Rmult_assoc, Rinv_l.
+rewrite Rmult_1_r, Rmult_1_l.
+case l.
+now apply (Z2R_lt 2 4).
+now apply (Z2R_lt 1 4).
+now apply (Z2R_lt 3 4).
+apply Rgt_not_eq.
+now apply (Z2R_lt 0 4).
+split.
+apply Rplus_lt_reg_r with (d * (v - 1))%R.
+ring_simplify.
+rewrite Rmult_comm.
+now apply Rmult_lt_compat_l.
+apply Rplus_lt_reg_r with (-u * v)%R.
+replace (- u * v + (d + v * (u - d)))%R with (d * (1 - v))%R by ring.
+replace (- u * v + u)%R with (u * (1 - v))%R by ring.
+apply Rmult_lt_compat_r.
+apply Rplus_lt_reg_r with v.
+now ring_simplify.
+exact Hdu.
+(* *)
+set (v := (match l with Lt => 1 | Eq => 2 | Gt => 3 end)%R).
+rewrite <- (Rcompare_plus_r (- (d + u) / 2)).
+rewrite <- (Rcompare_mult_r 4).
+2: now apply (Z2R_lt 0 4).
+replace (((d + u) / 2 + - (d + u) / 2) * 4)%R with ((u - d) * 0)%R by field.
+replace ((d + v / 4 * (u - d) + - (d + u) / 2) * 4)%R with ((u - d) * (v - 2))%R by field.
+rewrite Rcompare_mult_l.
+2: now apply Rlt_Rminus.
+rewrite <- (Rcompare_plus_r 2).
+ring_simplify (v - 2 + 2)%R (0 + 2)%R.
+unfold v.
+case l.
+exact (Rcompare_Z2R 2 2).
+exact (Rcompare_Z2R 1 2).
+exact (Rcompare_Z2R 3 2).
+Qed.
+
+Section Fcalc_bracket_step.
+
+Variable start step : R.
+Variable nb_steps : Z.
+Variable Hstep : (0 < step)%R.
+
+Lemma ordered_steps :
+ forall k,
+ (start + Z2R k * step < start + Z2R (k + 1) * step)%R.
+Proof.
+intros k.
+apply Rplus_lt_compat_l.
+apply Rmult_lt_compat_r.
+exact Hstep.
+apply Z2R_lt.
+apply Zlt_succ.
+Qed.
+
+Lemma middle_range :
+ forall k,
+ ((start + (start + Z2R k * step)) / 2 = start + (Z2R k / 2 * step))%R.
+Proof.
+intros k.
+field.
+Qed.
+
+Hypothesis (Hnb_steps : (1 < nb_steps)%Z).
+
+Lemma inbetween_step_not_Eq :
+ forall x k l l',
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ (0 < k < nb_steps)%Z ->
+ Rcompare x (start + (Z2R nb_steps / 2 * step))%R = l' ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact l').
+Proof.
+intros x k l l' Hx Hk Hl'.
+constructor.
+(* . *)
+assert (Hx' := inbetween_bounds _ _ (ordered_steps _) _ _ Hx).
+split.
+apply Rlt_le_trans with (2 := proj1 Hx').
+rewrite <- (Rplus_0_r start) at 1.
+apply Rplus_lt_compat_l.
+apply Rmult_lt_0_compat.
+now apply (Z2R_lt 0).
+exact Hstep.
+apply Rlt_le_trans with (1 := proj2 Hx').
+apply Rplus_le_compat_l.
+apply Rmult_le_compat_r.
+now apply Rlt_le.
+apply Z2R_le.
+omega.
+(* . *)
+now rewrite middle_range.
+Qed.
+
+Theorem inbetween_step_Lo :
+ forall x k l,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ (0 < k)%Z -> (2 * k + 1 < nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Lt).
+Proof.
+intros x k l Hx Hk1 Hk2.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+apply Rcompare_Lt.
+assert (Hx' := inbetween_bounds _ _ (ordered_steps _) _ _ Hx).
+apply Rlt_le_trans with (1 := proj2 Hx').
+apply Rcompare_not_Lt_inv.
+rewrite Rcompare_plus_l, Rcompare_mult_r, Rcompare_half_l.
+apply Rcompare_not_Lt.
+change 2%R with (Z2R 2).
+rewrite <- Z2R_mult.
+apply Z2R_le.
+omega.
+exact Hstep.
+Qed.
+
+Theorem inbetween_step_Hi :
+ forall x k l,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ (nb_steps < 2 * k)%Z -> (k < nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Gt).
+Proof.
+intros x k l Hx Hk1 Hk2.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+apply Rcompare_Gt.
+assert (Hx' := inbetween_bounds _ _ (ordered_steps _) _ _ Hx).
+apply Rlt_le_trans with (2 := proj1 Hx').
+apply Rcompare_Lt_inv.
+rewrite Rcompare_plus_l, Rcompare_mult_r, Rcompare_half_l.
+apply Rcompare_Lt.
+change 2%R with (Z2R 2).
+rewrite <- Z2R_mult.
+apply Z2R_lt.
+omega.
+exact Hstep.
+Qed.
+
+Theorem inbetween_step_Lo_not_Eq :
+ forall x l,
+ inbetween start (start + step) x l ->
+ l <> loc_Exact ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Lt).
+Proof.
+intros x l Hx Hl.
+assert (Hx' := inbetween_bounds_not_Eq _ _ _ _ Hx Hl).
+constructor.
+(* . *)
+split.
+apply Hx'.
+apply Rlt_trans with (1 := proj2 Hx').
+apply Rplus_lt_compat_l.
+rewrite <- (Rmult_1_l step) at 1.
+apply Rmult_lt_compat_r.
+exact Hstep.
+now apply (Z2R_lt 1).
+(* . *)
+apply Rcompare_Lt.
+apply Rlt_le_trans with (1 := proj2 Hx').
+rewrite middle_range.
+apply Rcompare_not_Lt_inv.
+rewrite <- (Rmult_1_l step) at 2.
+rewrite Rcompare_plus_l, Rcompare_mult_r, Rcompare_half_l.
+rewrite Rmult_1_r.
+apply Rcompare_not_Lt.
+apply (Z2R_le 2).
+now apply (Zlt_le_succ 1).
+exact Hstep.
+Qed.
+
+Lemma middle_odd :
+ forall k,
+ (2 * k + 1 = nb_steps)%Z ->
+ (((start + Z2R k * step) + (start + Z2R (k + 1) * step))/2 = start + Z2R nb_steps /2 * step)%R.
+Proof.
+intros k Hk.
+rewrite <- Hk.
+rewrite 2!Z2R_plus, Z2R_mult.
+simpl. field.
+Qed.
+
+Theorem inbetween_step_any_Mi_odd :
+ forall x k l,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x (loc_Inexact l) ->
+ (2 * k + 1 = nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact l).
+Proof.
+intros x k l Hx Hk.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+inversion_clear Hx as [|l' _ Hl].
+now rewrite (middle_odd _ Hk) in Hl.
+Qed.
+
+Theorem inbetween_step_Lo_Mi_Eq_odd :
+ forall x k,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x loc_Exact ->
+ (2 * k + 1 = nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Lt).
+Proof.
+intros x k Hx Hk.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+inversion_clear Hx as [Hl|].
+rewrite Hl.
+rewrite Rcompare_plus_l, Rcompare_mult_r, Rcompare_half_r.
+apply Rcompare_Lt.
+change 2%R with (Z2R 2).
+rewrite <- Z2R_mult.
+apply Z2R_lt.
+rewrite <- Hk.
+apply Zlt_succ.
+exact Hstep.
+Qed.
+
+Theorem inbetween_step_Hi_Mi_even :
+ forall x k l,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ l <> loc_Exact ->
+ (2 * k = nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Gt).
+Proof.
+intros x k l Hx Hl Hk.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+apply Rcompare_Gt.
+assert (Hx' := inbetween_bounds_not_Eq _ _ _ _ Hx Hl).
+apply Rle_lt_trans with (2 := proj1 Hx').
+apply Rcompare_not_Lt_inv.
+rewrite Rcompare_plus_l, Rcompare_mult_r, Rcompare_half_r.
+change 2%R with (Z2R 2).
+rewrite <- Z2R_mult.
+apply Rcompare_not_Lt.
+apply Z2R_le.
+rewrite Hk.
+apply Zle_refl.
+exact Hstep.
+Qed.
+
+Theorem inbetween_step_Mi_Mi_even :
+ forall x k,
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x loc_Exact ->
+ (2 * k = nb_steps)%Z ->
+ inbetween start (start + Z2R nb_steps * step) x (loc_Inexact Eq).
+Proof.
+intros x k Hx Hk.
+apply inbetween_step_not_Eq with (1 := Hx).
+omega.
+apply Rcompare_Eq.
+inversion_clear Hx as [Hx'|].
+rewrite Hx', <- Hk, Z2R_mult.
+simpl (Z2R 2).
+field.
+Qed.
+
+(** Computes a new location when the interval containing a real
+ number is split into nb_steps subintervals and the real is
+ in the k-th one. (Even radix.) *)
+
+Definition new_location_even k l :=
+ if Zeq_bool k 0 then
+ match l with loc_Exact => l | _ => loc_Inexact Lt end
+ else
+ loc_Inexact
+ match Zcompare (2 * k) nb_steps with
+ | Lt => Lt
+ | Eq => match l with loc_Exact => Eq | _ => Gt end
+ | Gt => Gt
+ end.
+
+Theorem new_location_even_correct :
+ Zeven nb_steps = true ->
+ forall x k l, (0 <= k < nb_steps)%Z ->
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ inbetween start (start + Z2R nb_steps * step) x (new_location_even k l).
+Proof.
+intros He x k l Hk Hx.
+unfold new_location_even.
+destruct (Zeq_bool_spec k 0) as [Hk0|Hk0].
+(* k = 0 *)
+rewrite Hk0 in Hx.
+rewrite Rmult_0_l, Rplus_0_r, Rmult_1_l in Hx.
+set (l' := match l with loc_Exact => l | _ => loc_Inexact Lt end).
+assert ((l = loc_Exact /\ l' = loc_Exact) \/ (l <> loc_Exact /\ l' = loc_Inexact Lt)).
+unfold l' ; case l ; try (now left) ; right ; now split.
+destruct H as [(H1,H2)|(H1,H2)] ; rewrite H2.
+constructor.
+rewrite H1 in Hx.
+now inversion_clear Hx.
+now apply inbetween_step_Lo_not_Eq with (2 := H1).
+(* k <> 0 *)
+destruct (Zcompare_spec (2 * k) nb_steps) as [Hk1|Hk1|Hk1].
+(* . 2 * k < nb_steps *)
+apply inbetween_step_Lo with (1 := Hx).
+omega.
+destruct (Zeven_ex nb_steps).
+rewrite He in H.
+omega.
+(* . 2 * k = nb_steps *)
+set (l' := match l with loc_Exact => Eq | _ => Gt end).
+assert ((l = loc_Exact /\ l' = Eq) \/ (l <> loc_Exact /\ l' = Gt)).
+unfold l' ; case l ; try (now left) ; right ; now split.
+destruct H as [(H1,H2)|(H1,H2)] ; rewrite H2.
+rewrite H1 in Hx.
+now apply inbetween_step_Mi_Mi_even with (1 := Hx).
+now apply inbetween_step_Hi_Mi_even with (1 := Hx).
+(* . 2 * k > nb_steps *)
+apply inbetween_step_Hi with (1 := Hx).
+exact Hk1.
+apply Hk.
+Qed.
+
+(** Computes a new location when the interval containing a real
+ number is split into nb_steps subintervals and the real is
+ in the k-th one. (Odd radix.) *)
+
+Definition new_location_odd k l :=
+ if Zeq_bool k 0 then
+ match l with loc_Exact => l | _ => loc_Inexact Lt end
+ else
+ loc_Inexact
+ match Zcompare (2 * k + 1) nb_steps with
+ | Lt => Lt
+ | Eq => match l with loc_Inexact l => l | loc_Exact => Lt end
+ | Gt => Gt
+ end.
+
+Theorem new_location_odd_correct :
+ Zeven nb_steps = false ->
+ forall x k l, (0 <= k < nb_steps)%Z ->
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ inbetween start (start + Z2R nb_steps * step) x (new_location_odd k l).
+Proof.
+intros Ho x k l Hk Hx.
+unfold new_location_odd.
+destruct (Zeq_bool_spec k 0) as [Hk0|Hk0].
+(* k = 0 *)
+rewrite Hk0 in Hx.
+rewrite Rmult_0_l, Rplus_0_r, Rmult_1_l in Hx.
+set (l' := match l with loc_Exact => l | _ => loc_Inexact Lt end).
+assert ((l = loc_Exact /\ l' = loc_Exact) \/ (l <> loc_Exact /\ l' = loc_Inexact Lt)).
+unfold l' ; case l ; try (now left) ; right ; now split.
+destruct H as [(H1,H2)|(H1,H2)] ; rewrite H2.
+constructor.
+rewrite H1 in Hx.
+now inversion_clear Hx.
+now apply inbetween_step_Lo_not_Eq with (2 := H1).
+(* k <> 0 *)
+destruct (Zcompare_spec (2 * k + 1) nb_steps) as [Hk1|Hk1|Hk1].
+(* . 2 * k + 1 < nb_steps *)
+apply inbetween_step_Lo with (1 := Hx) (3 := Hk1).
+omega.
+(* . 2 * k + 1 = nb_steps *)
+destruct l.
+apply inbetween_step_Lo_Mi_Eq_odd with (1 := Hx) (2 := Hk1).
+apply inbetween_step_any_Mi_odd with (1 := Hx) (2 := Hk1).
+(* . 2 * k + 1 > nb_steps *)
+apply inbetween_step_Hi with (1 := Hx).
+destruct (Zeven_ex nb_steps).
+rewrite Ho in H.
+omega.
+apply Hk.
+Qed.
+
+Definition new_location :=
+ if Zeven nb_steps then new_location_even else new_location_odd.
+
+Theorem new_location_correct :
+ forall x k l, (0 <= k < nb_steps)%Z ->
+ inbetween (start + Z2R k * step) (start + Z2R (k + 1) * step) x l ->
+ inbetween start (start + Z2R nb_steps * step) x (new_location k l).
+Proof.
+intros x k l Hk Hx.
+unfold new_location.
+generalize (refl_equal nb_steps) (Zle_lt_trans _ _ _ (proj1 Hk) (proj2 Hk)).
+pattern nb_steps at 2 3 5 ; case nb_steps.
+now intros _.
+(* . *)
+intros [p|p|] Hp _.
+apply new_location_odd_correct with (2 := Hk) (3 := Hx).
+now rewrite Hp.
+apply new_location_even_correct with (2 := Hk) (3 := Hx).
+now rewrite Hp.
+now rewrite Hp in Hnb_steps.
+(* . *)
+now intros p _.
+Qed.
+
+End Fcalc_bracket_step.
+
+Section Fcalc_bracket_scale.
+
+Lemma inbetween_mult_aux :
+ forall x d s,
+ ((x * s + d * s) / 2 = (x + d) / 2 * s)%R.
+Proof.
+intros x d s.
+field.
+Qed.
+
+Theorem inbetween_mult_compat :
+ forall x d u l s,
+ (0 < s)%R ->
+ inbetween x d u l ->
+ inbetween (x * s) (d * s) (u * s) l.
+Proof.
+intros x d u l s Hs [Hx|l' Hx Hl] ; constructor.
+now rewrite Hx.
+now split ; apply Rmult_lt_compat_r.
+rewrite inbetween_mult_aux.
+now rewrite Rcompare_mult_r.
+Qed.
+
+Theorem inbetween_mult_reg :
+ forall x d u l s,
+ (0 < s)%R ->
+ inbetween (x * s) (d * s) (u * s) l ->
+ inbetween x d u l.
+Proof.
+intros x d u l s Hs [Hx|l' Hx Hl] ; constructor.
+apply Rmult_eq_reg_r with (1 := Hx).
+now apply Rgt_not_eq.
+now split ; apply Rmult_lt_reg_r with s.
+rewrite <- Rcompare_mult_r with (1 := Hs).
+now rewrite inbetween_mult_aux in Hl.
+Qed.
+
+End Fcalc_bracket_scale.
+
+Section Fcalc_bracket_generic.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+(** Specialization of inbetween for two consecutive floating-point numbers. *)
+
+Definition inbetween_float m e x l :=
+ inbetween (F2R (Float beta m e)) (F2R (Float beta (m + 1) e)) x l.
+
+Theorem inbetween_float_bounds :
+ forall x m e l,
+ inbetween_float m e x l ->
+ (F2R (Float beta m e) <= x < F2R (Float beta (m + 1) e))%R.
+Proof.
+intros x m e l [Hx|l' Hx Hl].
+rewrite Hx.
+split.
+apply Rle_refl.
+apply F2R_lt_compat.
+apply Zlt_succ.
+split.
+now apply Rlt_le.
+apply Hx.
+Qed.
+
+(** Specialization of inbetween for two consecutive integers. *)
+
+Definition inbetween_int m x l :=
+ inbetween (Z2R m) (Z2R (m + 1)) x l.
+
+Theorem inbetween_float_new_location :
+ forall x m e l k,
+ (0 < k)%Z ->
+ inbetween_float m e x l ->
+ inbetween_float (Zdiv m (Zpower beta k)) (e + k) x (new_location (Zpower beta k) (Zmod m (Zpower beta k)) l).
+Proof.
+intros x m e l k Hk Hx.
+unfold inbetween_float in *.
+assert (Hr: forall m, F2R (Float beta m (e + k)) = F2R (Float beta (m * Zpower beta k) e)).
+clear -Hk. intros m.
+rewrite (F2R_change_exp beta e).
+apply (f_equal (fun r => F2R (Float beta (m * Zpower _ r) e))).
+ring.
+omega.
+assert (Hp: (Zpower beta k > 0)%Z).
+apply Zlt_gt.
+apply Zpower_gt_0.
+now apply Zlt_le_weak.
+(* . *)
+rewrite 2!Hr.
+rewrite Zmult_plus_distr_l, Zmult_1_l.
+unfold F2R at 2. simpl.
+rewrite Z2R_plus, Rmult_plus_distr_r.
+apply new_location_correct.
+apply bpow_gt_0.
+now apply Zpower_gt_1.
+now apply Z_mod_lt.
+rewrite <- 2!Rmult_plus_distr_r, <- 2!Z2R_plus.
+rewrite Zmult_comm, Zplus_assoc.
+now rewrite <- Z_div_mod_eq.
+Qed.
+
+Theorem inbetween_float_new_location_single :
+ forall x m e l,
+ inbetween_float m e x l ->
+ inbetween_float (Zdiv m beta) (e + 1) x (new_location beta (Zmod m beta) l).
+Proof.
+intros x m e l Hx.
+replace (radix_val beta) with (Zpower beta 1).
+now apply inbetween_float_new_location.
+apply Zmult_1_r.
+Qed.
+
+Theorem inbetween_float_ex :
+ forall m e l,
+ exists x,
+ inbetween_float m e x l.
+Proof.
+intros m e l.
+apply inbetween_ex.
+apply F2R_lt_compat.
+apply Zlt_succ.
+Qed.
+
+Theorem inbetween_float_unique :
+ forall x e m l m' l',
+ inbetween_float m e x l ->
+ inbetween_float m' e x l' ->
+ m = m' /\ l = l'.
+Proof.
+intros x e m l m' l' H H'.
+refine ((fun Hm => conj Hm _) _).
+rewrite <- Hm in H'. clear -H H'.
+apply inbetween_unique with (1 := H) (2 := H').
+destruct (inbetween_float_bounds x m e l H) as (H1,H2).
+destruct (inbetween_float_bounds x m' e l' H') as (H3,H4).
+cut (m < m' + 1 /\ m' < m + 1)%Z. clear ; omega.
+now split ; apply F2R_lt_reg with beta e ; apply Rle_lt_trans with x.
+Qed.
+
+End Fcalc_bracket_generic.
diff --git a/flocq/Calc/Fcalc_digits.v b/flocq/Calc/Fcalc_digits.v
new file mode 100644
index 0000000..6210bac
--- /dev/null
+++ b/flocq/Calc/Fcalc_digits.v
@@ -0,0 +1,382 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Functions for computing the number of digits of integers and related theorems. *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_float_prop.
+Require Import Fcore_digits.
+
+Section Fcalc_digits.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+
+
+Theorem Zdigits_ln_beta :
+ forall n,
+ n <> Z0 ->
+ Zdigits beta n = ln_beta beta (Z2R n).
+Proof.
+intros n Hn.
+destruct (ln_beta beta (Z2R n)) as (e, He) ; simpl.
+specialize (He (Z2R_neq _ _ Hn)).
+rewrite <- Z2R_abs in He.
+assert (Hd := Zdigits_correct beta n).
+assert (Hd' := Zdigits_gt_0 beta n).
+apply Zle_antisym ; apply (bpow_lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 He).
+rewrite <- Z2R_Zpower by omega.
+now apply Z2R_le.
+apply Rle_lt_trans with (1 := proj1 He).
+rewrite <- Z2R_Zpower by omega.
+now apply Z2R_lt.
+Qed.
+
+Theorem ln_beta_F2R_Zdigits :
+ forall m e, m <> Z0 ->
+ (ln_beta beta (F2R (Float beta m e)) = Zdigits beta m + e :> Z)%Z.
+Proof.
+intros m e Hm.
+rewrite ln_beta_F2R with (1 := Hm).
+apply (f_equal (fun v => Zplus v e)).
+apply sym_eq.
+now apply Zdigits_ln_beta.
+Qed.
+
+Theorem Zdigits_mult_Zpower :
+ forall m e,
+ m <> Z0 -> (0 <= e)%Z ->
+ Zdigits beta (m * Zpower beta e) = (Zdigits beta m + e)%Z.
+Proof.
+intros m e Hm He.
+rewrite <- ln_beta_F2R_Zdigits with (1 := Hm).
+rewrite Zdigits_ln_beta.
+rewrite Z2R_mult.
+now rewrite Z2R_Zpower with (1 := He).
+contradict Hm.
+apply Zmult_integral_l with (2 := Hm).
+apply neq_Z2R.
+rewrite Z2R_Zpower with (1 := He).
+apply Rgt_not_eq.
+apply bpow_gt_0.
+Qed.
+
+Theorem Zdigits_Zpower :
+ forall e,
+ (0 <= e)%Z ->
+ Zdigits beta (Zpower beta e) = (e + 1)%Z.
+Proof.
+intros e He.
+rewrite <- (Zmult_1_l (Zpower beta e)).
+rewrite Zdigits_mult_Zpower ; try easy.
+apply Zplus_comm.
+Qed.
+
+Theorem Zdigits_le :
+ forall x y,
+ (0 <= x)%Z -> (x <= y)%Z ->
+ (Zdigits beta x <= Zdigits beta y)%Z.
+Proof.
+intros x y Hx Hxy.
+case (Z_lt_le_dec 0 x).
+clear Hx. intros Hx.
+rewrite 2!Zdigits_ln_beta.
+apply ln_beta_le.
+now apply (Z2R_lt 0).
+now apply Z2R_le.
+apply Zgt_not_eq.
+now apply Zlt_le_trans with x.
+now apply Zgt_not_eq.
+intros Hx'.
+rewrite (Zle_antisym _ _ Hx' Hx).
+apply Zdigits_ge_0.
+Qed.
+
+Theorem lt_Zdigits :
+ forall x y,
+ (0 <= y)%Z ->
+ (Zdigits beta x < Zdigits beta y)%Z ->
+ (x < y)%Z.
+Proof.
+intros x y Hy.
+cut (y <= x -> Zdigits beta y <= Zdigits beta x)%Z. omega.
+now apply Zdigits_le.
+Qed.
+
+Theorem Zpower_le_Zdigits :
+ forall e x,
+ (e < Zdigits beta x)%Z ->
+ (Zpower beta e <= Zabs x)%Z.
+Proof.
+intros e x Hex.
+destruct (Zdigits_correct beta x) as (H1,H2).
+apply Zle_trans with (2 := H1).
+apply Zpower_le.
+clear -Hex ; omega.
+Qed.
+
+Theorem Zdigits_le_Zpower :
+ forall e x,
+ (Zabs x < Zpower beta e)%Z ->
+ (Zdigits beta x <= e)%Z.
+Proof.
+intros e x.
+generalize (Zpower_le_Zdigits e x).
+omega.
+Qed.
+
+Theorem Zpower_gt_Zdigits :
+ forall e x,
+ (Zdigits beta x <= e)%Z ->
+ (Zabs x < Zpower beta e)%Z.
+Proof.
+intros e x Hex.
+destruct (Zdigits_correct beta x) as (H1,H2).
+apply Zlt_le_trans with (1 := H2).
+now apply Zpower_le.
+Qed.
+
+Theorem Zdigits_gt_Zpower :
+ forall e x,
+ (Zpower beta e <= Zabs x)%Z ->
+ (e < Zdigits beta x)%Z.
+Proof.
+intros e x Hex.
+generalize (Zpower_gt_Zdigits e x).
+omega.
+Qed.
+
+(** Characterizes the number digits of a product.
+
+This strong version is needed for proofs of division and square root
+algorithms, since they involve operation remainders.
+*)
+
+Theorem Zdigits_mult_strong :
+ forall x y,
+ (0 <= x)%Z -> (0 <= y)%Z ->
+ (Zdigits beta (x + y + x * y) <= Zdigits beta x + Zdigits beta y)%Z.
+Proof.
+intros x y Hx Hy.
+case (Z_lt_le_dec 0 x).
+clear Hx. intros Hx.
+case (Z_lt_le_dec 0 y).
+clear Hy. intros Hy.
+(* . *)
+assert (Hxy: (0 < Z2R (x + y + x * y))%R).
+apply (Z2R_lt 0).
+change Z0 with (0 + 0 + 0)%Z.
+apply Zplus_lt_compat.
+now apply Zplus_lt_compat.
+now apply Zmult_lt_0_compat.
+rewrite 3!Zdigits_ln_beta ; try now (apply sym_not_eq ; apply Zlt_not_eq).
+apply ln_beta_le_bpow with (1 := Rgt_not_eq _ _ Hxy).
+rewrite Rabs_pos_eq with (1 := Rlt_le _ _ Hxy).
+destruct (ln_beta beta (Z2R x)) as (ex, Hex). simpl.
+specialize (Hex (Rgt_not_eq _ _ (Z2R_lt _ _ Hx))).
+destruct (ln_beta beta (Z2R y)) as (ey, Hey). simpl.
+specialize (Hey (Rgt_not_eq _ _ (Z2R_lt _ _ Hy))).
+apply Rlt_le_trans with (Z2R (x + 1) * Z2R (y + 1))%R.
+rewrite <- Z2R_mult.
+apply Z2R_lt.
+apply Zplus_lt_reg_r with (- (x + y + x * y + 1))%Z.
+now ring_simplify.
+rewrite bpow_plus.
+apply Rmult_le_compat ; try (apply (Z2R_le 0) ; omega).
+rewrite <- (Rmult_1_r (Z2R (x + 1))).
+change (F2R (Float beta (x + 1) 0) <= bpow ex)%R.
+apply F2R_p1_le_bpow.
+exact Hx.
+unfold F2R. rewrite Rmult_1_r.
+apply Rle_lt_trans with (Rabs (Z2R x)).
+apply RRle_abs.
+apply Hex.
+rewrite <- (Rmult_1_r (Z2R (y + 1))).
+change (F2R (Float beta (y + 1) 0) <= bpow ey)%R.
+apply F2R_p1_le_bpow.
+exact Hy.
+unfold F2R. rewrite Rmult_1_r.
+apply Rle_lt_trans with (Rabs (Z2R y)).
+apply RRle_abs.
+apply Hey.
+apply neq_Z2R.
+now apply Rgt_not_eq.
+(* . *)
+intros Hy'.
+rewrite (Zle_antisym _ _ Hy' Hy).
+rewrite Zmult_0_r, 3!Zplus_0_r.
+apply Zle_refl.
+intros Hx'.
+rewrite (Zle_antisym _ _ Hx' Hx).
+rewrite Zmult_0_l, Zplus_0_r, 2!Zplus_0_l.
+apply Zle_refl.
+Qed.
+
+Theorem Zdigits_mult :
+ forall x y,
+ (Zdigits beta (x * y) <= Zdigits beta x + Zdigits beta y)%Z.
+Proof.
+intros x y.
+rewrite <- Zdigits_abs.
+rewrite <- (Zdigits_abs _ x).
+rewrite <- (Zdigits_abs _ y).
+apply Zle_trans with (Zdigits beta (Zabs x + Zabs y + Zabs x * Zabs y)).
+apply Zdigits_le.
+apply Zabs_pos.
+rewrite Zabs_Zmult.
+generalize (Zabs_pos x) (Zabs_pos y).
+omega.
+apply Zdigits_mult_strong ; apply Zabs_pos.
+Qed.
+
+Theorem Zdigits_mult_ge :
+ forall x y,
+ (x <> 0)%Z -> (y <> 0)%Z ->
+ (Zdigits beta x + Zdigits beta y - 1 <= Zdigits beta (x * y))%Z.
+Proof.
+intros x y Zx Zy.
+cut ((Zdigits beta x - 1) + (Zdigits beta y - 1) < Zdigits beta (x * y))%Z. omega.
+apply Zdigits_gt_Zpower.
+rewrite Zabs_Zmult.
+rewrite Zpower_exp.
+apply Zmult_le_compat.
+apply Zpower_le_Zdigits.
+apply Zlt_pred.
+apply Zpower_le_Zdigits.
+apply Zlt_pred.
+apply Zpower_ge_0.
+apply Zpower_ge_0.
+generalize (Zdigits_gt_0 beta x). omega.
+generalize (Zdigits_gt_0 beta y). omega.
+Qed.
+
+Theorem Zdigits_div_Zpower :
+ forall m e,
+ (0 <= m)%Z ->
+ (0 <= e <= Zdigits beta m)%Z ->
+ Zdigits beta (m / Zpower beta e) = (Zdigits beta m - e)%Z.
+Proof.
+intros m e Hm He.
+destruct (Zle_lt_or_eq 0 m Hm) as [Hm'|Hm'].
+(* *)
+destruct (Zle_lt_or_eq _ _ (proj2 He)) as [He'|He'].
+(* . *)
+unfold Zminus.
+rewrite <- ln_beta_F2R_Zdigits.
+2: now apply Zgt_not_eq.
+assert (Hp: (0 < Zpower beta e)%Z).
+apply lt_Z2R.
+rewrite Z2R_Zpower with (1 := proj1 He).
+apply bpow_gt_0.
+rewrite Zdigits_ln_beta.
+rewrite <- Zfloor_div with (1 := Zgt_not_eq _ _ Hp).
+rewrite Z2R_Zpower with (1 := proj1 He).
+unfold Rdiv.
+rewrite <- bpow_opp.
+change (Z2R m * bpow (-e))%R with (F2R (Float beta m (-e))).
+destruct (ln_beta beta (F2R (Float beta m (-e)))) as (e', E').
+simpl.
+specialize (E' (Rgt_not_eq _ _ (F2R_gt_0_compat beta (Float beta m (-e)) Hm'))).
+apply ln_beta_unique.
+rewrite Rabs_pos_eq in E'.
+2: now apply F2R_ge_0_compat.
+rewrite Rabs_pos_eq.
+split.
+assert (H': (0 <= e' - 1)%Z).
+(* .. *)
+cut (1 <= e')%Z. omega.
+apply bpow_lt_bpow with beta.
+apply Rle_lt_trans with (2 := proj2 E').
+simpl.
+rewrite <- (Rinv_r (bpow e)).
+rewrite <- bpow_opp.
+unfold F2R. simpl.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+rewrite <- Z2R_Zpower with (1 := proj1 He).
+apply Z2R_le.
+rewrite <- Zabs_eq with (1 := Hm).
+now apply Zpower_le_Zdigits.
+apply Rgt_not_eq.
+apply bpow_gt_0.
+(* .. *)
+rewrite <- Z2R_Zpower with (1 := H').
+apply Z2R_le.
+apply Zfloor_lub.
+rewrite Z2R_Zpower with (1 := H').
+apply E'.
+apply Rle_lt_trans with (2 := proj2 E').
+apply Zfloor_lb.
+apply (Z2R_le 0).
+apply Zfloor_lub.
+now apply F2R_ge_0_compat.
+apply Zgt_not_eq.
+apply Zlt_le_trans with (beta^e / beta^e)%Z.
+rewrite Z_div_same_full.
+apply refl_equal.
+now apply Zgt_not_eq.
+apply Z_div_le.
+now apply Zlt_gt.
+rewrite <- Zabs_eq with (1 := Hm).
+now apply Zpower_le_Zdigits.
+(* . *)
+unfold Zminus.
+rewrite He', Zplus_opp_r.
+rewrite Zdiv_small.
+apply refl_equal.
+apply conj with (1 := Hm).
+pattern m at 1 ; rewrite <- Zabs_eq with (1 := Hm).
+apply Zpower_gt_Zdigits.
+apply Zle_refl.
+(* *)
+revert He.
+rewrite <- Hm'.
+intros (H1, H2).
+simpl.
+now rewrite (Zle_antisym _ _ H2 H1).
+Qed.
+
+End Fcalc_digits.
+
+Definition radix2 := Build_radix 2 (refl_equal _).
+
+Theorem Z_of_nat_S_digits2_Pnat :
+ forall m : positive,
+ Z_of_nat (S (digits2_Pnat m)) = Zdigits radix2 (Zpos m).
+Proof.
+intros m.
+rewrite Zdigits_ln_beta. 2: easy.
+apply sym_eq.
+apply ln_beta_unique.
+generalize (digits2_Pnat m) (digits2_Pnat_correct m).
+intros d H.
+simpl in H.
+replace (Z_of_nat (S d) - 1)%Z with (Z_of_nat d).
+rewrite <- Z2R_abs.
+rewrite <- 2!Z2R_Zpower_nat.
+split.
+now apply Z2R_le.
+now apply Z2R_lt.
+rewrite inj_S.
+apply Zpred_succ.
+Qed.
+
diff --git a/flocq/Calc/Fcalc_div.v b/flocq/Calc/Fcalc_div.v
new file mode 100644
index 0000000..6594e55
--- /dev/null
+++ b/flocq/Calc/Fcalc_div.v
@@ -0,0 +1,165 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Helper function and theorem for computing the rounded quotient of two floating-point numbers. *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_float_prop.
+Require Import Fcore_digits.
+Require Import Fcalc_bracket.
+Require Import Fcalc_digits.
+
+Section Fcalc_div.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+(** Computes a mantissa of precision p, the corresponding exponent,
+ and the position with respect to the real quotient of the
+ input floating-point numbers.
+
+The algorithm performs the following steps:
+- Shift dividend mantissa so that it has at least p2 + p digits.
+- Perform the Euclidean division.
+- Compute the position according to the division remainder.
+
+Complexity is fine as long as p1 <= 2p and p2 <= p.
+*)
+
+Definition Fdiv_core prec m1 e1 m2 e2 :=
+ let d1 := Zdigits beta m1 in
+ let d2 := Zdigits beta m2 in
+ let e := (e1 - e2)%Z in
+ let (m, e') :=
+ match (d2 + prec - d1)%Z with
+ | Zpos p => (m1 * Zpower_pos beta p, e + Zneg p)%Z
+ | _ => (m1, e)
+ end in
+ let '(q, r) := Zdiv_eucl m m2 in
+ (q, e', new_location m2 r loc_Exact).
+
+Theorem Fdiv_core_correct :
+ forall prec m1 e1 m2 e2,
+ (0 < prec)%Z ->
+ (0 < m1)%Z -> (0 < m2)%Z ->
+ let '(m, e, l) := Fdiv_core prec m1 e1 m2 e2 in
+ (prec <= Zdigits beta m)%Z /\
+ inbetween_float beta m e (F2R (Float beta m1 e1) / F2R (Float beta m2 e2)) l.
+Proof.
+intros prec m1 e1 m2 e2 Hprec Hm1 Hm2.
+unfold Fdiv_core.
+set (d1 := Zdigits beta m1).
+set (d2 := Zdigits beta m2).
+case_eq
+ (match (d2 + prec - d1)%Z with
+ | Zpos p => ((m1 * Zpower_pos beta p)%Z, (e1 - e2 + Zneg p)%Z)
+ | _ => (m1, (e1 - e2)%Z)
+ end).
+intros m' e' Hme.
+(* . the shifted mantissa m' has enough digits *)
+assert (Hs: F2R (Float beta m' (e' + e2)) = F2R (Float beta m1 e1) /\ (0 < m')%Z /\ (d2 + prec <= Zdigits beta m')%Z).
+replace (d2 + prec)%Z with (d2 + prec - d1 + d1)%Z by ring.
+destruct (d2 + prec - d1)%Z as [|p|p] ;
+ unfold d1 ;
+ inversion Hme.
+ring_simplify (e1 - e2 + e2)%Z.
+repeat split.
+now rewrite <- H0.
+apply Zle_refl.
+replace (e1 - e2 + Zneg p + e2)%Z with (e1 - Zpos p)%Z by (unfold Zminus ; simpl ; ring).
+fold (Zpower beta (Zpos p)).
+split.
+pattern (Zpos p) at 1 ; replace (Zpos p) with (e1 - (e1 - Zpos p))%Z by ring.
+apply sym_eq.
+apply F2R_change_exp.
+assert (0 < Zpos p)%Z by easy.
+omega.
+split.
+apply Zmult_lt_0_compat.
+exact Hm1.
+now apply Zpower_gt_0.
+rewrite Zdigits_mult_Zpower.
+rewrite Zplus_comm.
+apply Zle_refl.
+apply sym_not_eq.
+now apply Zlt_not_eq.
+easy.
+split.
+now ring_simplify (e1 - e2 + e2)%Z.
+assert (Zneg p < 0)%Z by easy.
+omega.
+(* . *)
+destruct Hs as (Hs1, (Hs2, Hs3)).
+rewrite <- Hs1.
+generalize (Z_div_mod m' m2 (Zlt_gt _ _ Hm2)).
+destruct (Zdiv_eucl m' m2) as (q, r).
+intros (Hq, Hr).
+split.
+(* . the result mantissa q has enough digits *)
+cut (Zdigits beta m' <= d2 + Zdigits beta q)%Z. omega.
+unfold d2.
+rewrite Hq.
+assert (Hq': (0 < q)%Z).
+apply Zmult_lt_reg_r with (1 := Hm2).
+assert (m2 < m')%Z.
+apply lt_Zdigits with beta.
+now apply Zlt_le_weak.
+unfold d2 in Hs3.
+clear -Hprec Hs3 ; omega.
+cut (q * m2 = m' - r)%Z. clear -Hr H ; omega.
+rewrite Hq.
+ring.
+apply Zle_trans with (Zdigits beta (m2 + q + m2 * q)).
+apply Zdigits_le.
+rewrite <- Hq.
+now apply Zlt_le_weak.
+clear -Hr Hq'. omega.
+apply Zdigits_mult_strong ; apply Zlt_le_weak.
+now apply Zle_lt_trans with r.
+exact Hq'.
+(* . the location is correctly computed *)
+unfold inbetween_float, F2R. simpl.
+rewrite bpow_plus, Z2R_plus.
+rewrite Hq, Z2R_plus, Z2R_mult.
+replace ((Z2R m2 * Z2R q + Z2R r) * (bpow e' * bpow e2) / (Z2R m2 * bpow e2))%R
+ with ((Z2R q + Z2R r / Z2R m2) * bpow e')%R.
+apply inbetween_mult_compat.
+apply bpow_gt_0.
+destruct (Z_lt_le_dec 1 m2) as [Hm2''|Hm2''].
+replace (Z2R 1) with (Z2R m2 * /Z2R m2)%R.
+apply new_location_correct ; try easy.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0).
+now constructor.
+apply Rinv_r.
+apply Rgt_not_eq.
+now apply (Z2R_lt 0).
+assert (r = 0 /\ m2 = 1)%Z by (clear -Hr Hm2'' ; omega).
+rewrite (proj1 H), (proj2 H).
+unfold Rdiv.
+rewrite Rmult_0_l, Rplus_0_r.
+now constructor.
+field.
+split ; apply Rgt_not_eq.
+apply bpow_gt_0.
+now apply (Z2R_lt 0).
+Qed.
+
+End Fcalc_div.
diff --git a/flocq/Calc/Fcalc_ops.v b/flocq/Calc/Fcalc_ops.v
new file mode 100644
index 0000000..15bb211
--- /dev/null
+++ b/flocq/Calc/Fcalc_ops.v
@@ -0,0 +1,161 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** Basic operations on floats: alignment, addition, multiplication *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_float_prop.
+
+Section Float_ops.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Definition Falign (f1 f2 : float beta) :=
+ let '(Float m1 e1) := f1 in
+ let '(Float m2 e2) := f2 in
+ if Zle_bool e1 e2
+ then (m1, (m2 * Zpower beta (e2 - e1))%Z, e1)
+ else ((m1 * Zpower beta (e1 - e2))%Z, m2, e2).
+
+Theorem Falign_spec :
+ forall f1 f2 : float beta,
+ let '(m1, m2, e) := Falign f1 f2 in
+ F2R f1 = F2R (Float beta m1 e) /\ F2R f2 = F2R (Float beta m2 e).
+Proof.
+unfold Falign.
+intros (m1, e1) (m2, e2).
+generalize (Zle_cases e1 e2).
+case (Zle_bool e1 e2) ; intros He ; split ; trivial.
+now rewrite <- F2R_change_exp.
+rewrite <- F2R_change_exp.
+apply refl_equal.
+omega.
+Qed.
+
+Theorem Falign_spec_exp:
+ forall f1 f2 : float beta,
+ snd (Falign f1 f2) = Zmin (Fexp f1) (Fexp f2).
+Proof.
+intros (m1,e1) (m2,e2).
+unfold Falign; simpl.
+generalize (Zle_cases e1 e2);case (Zle_bool e1 e2); intros He.
+case (Zmin_spec e1 e2); intros (H1,H2); easy.
+case (Zmin_spec e1 e2); intros (H1,H2); easy.
+Qed.
+
+Definition Fopp (f1: float beta) :=
+ let '(Float m1 e1) := f1 in
+ Float beta (-m1)%Z e1.
+
+Theorem F2R_opp :
+ forall f1 : float beta,
+ (F2R (Fopp f1) = -F2R f1)%R.
+intros (m1,e1).
+apply F2R_Zopp.
+Qed.
+
+Definition Fabs (f1: float beta) :=
+ let '(Float m1 e1) := f1 in
+ Float beta (Zabs m1)%Z e1.
+
+Theorem F2R_abs :
+ forall f1 : float beta,
+ (F2R (Fabs f1) = Rabs (F2R f1))%R.
+intros (m1,e1).
+apply F2R_Zabs.
+Qed.
+
+Definition Fplus (f1 f2 : float beta) :=
+ let '(m1, m2 ,e) := Falign f1 f2 in
+ Float beta (m1 + m2) e.
+
+Theorem F2R_plus :
+ forall f1 f2 : float beta,
+ F2R (Fplus f1 f2) = (F2R f1 + F2R f2)%R.
+Proof.
+intros f1 f2.
+unfold Fplus.
+generalize (Falign_spec f1 f2).
+destruct (Falign f1 f2) as ((m1, m2), e).
+intros (H1, H2).
+rewrite H1, H2.
+unfold F2R. simpl.
+rewrite Z2R_plus.
+apply Rmult_plus_distr_r.
+Qed.
+
+Theorem Fplus_same_exp :
+ forall m1 m2 e,
+ Fplus (Float beta m1 e) (Float beta m2 e) = Float beta (m1 + m2) e.
+Proof.
+intros m1 m2 e.
+unfold Fplus.
+simpl.
+now rewrite Zle_bool_refl, Zminus_diag, Zmult_1_r.
+Qed.
+
+Theorem Fexp_Fplus :
+ forall f1 f2 : float beta,
+ Fexp (Fplus f1 f2) = Zmin (Fexp f1) (Fexp f2).
+Proof.
+intros f1 f2.
+unfold Fplus.
+rewrite <- Falign_spec_exp.
+now destruct (Falign f1 f2) as ((p,q),e).
+Qed.
+
+Definition Fminus (f1 f2 : float beta) :=
+ Fplus f1 (Fopp f2).
+
+Theorem F2R_minus :
+ forall f1 f2 : float beta,
+ F2R (Fminus f1 f2) = (F2R f1 - F2R f2)%R.
+Proof.
+intros f1 f2; unfold Fminus.
+rewrite F2R_plus, F2R_opp.
+ring.
+Qed.
+
+Theorem Fminus_same_exp :
+ forall m1 m2 e,
+ Fminus (Float beta m1 e) (Float beta m2 e) = Float beta (m1 - m2) e.
+Proof.
+intros m1 m2 e.
+unfold Fminus.
+apply Fplus_same_exp.
+Qed.
+
+Definition Fmult (f1 f2 : float beta) :=
+ let '(Float m1 e1) := f1 in
+ let '(Float m2 e2) := f2 in
+ Float beta (m1 * m2) (e1 + e2).
+
+Theorem F2R_mult :
+ forall f1 f2 : float beta,
+ F2R (Fmult f1 f2) = (F2R f1 * F2R f2)%R.
+Proof.
+intros (m1, e1) (m2, e2).
+unfold Fmult, F2R. simpl.
+rewrite Z2R_mult, bpow_plus.
+ring.
+Qed.
+
+End Float_ops.
diff --git a/flocq/Calc/Fcalc_round.v b/flocq/Calc/Fcalc_round.v
new file mode 100644
index 0000000..3d31aea
--- /dev/null
+++ b/flocq/Calc/Fcalc_round.v
@@ -0,0 +1,1093 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Helper function for computing the rounded value of a real number. *)
+
+Require Import Fcore.
+Require Import Fcore_digits.
+Require Import Fcalc_bracket.
+Require Import Fcalc_digits.
+
+Section Fcalc_round.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Section Fcalc_round_fexp.
+
+Variable fexp : Z -> Z.
+Context { valid_exp : Valid_exp fexp }.
+Notation format := (generic_format beta fexp).
+
+(** Relates location and rounding. *)
+
+Theorem inbetween_float_round :
+ forall rnd choice,
+ ( forall x m l, inbetween_int m x l -> rnd x = choice m l ) ->
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp rnd x = F2R (Float beta (choice m l) e).
+Proof.
+intros rnd choice Hc x m l e Hl.
+unfold round, F2R. simpl.
+apply (f_equal (fun m => (Z2R m * bpow e)%R)).
+apply Hc.
+apply inbetween_mult_reg with (bpow e).
+apply bpow_gt_0.
+now rewrite scaled_mantissa_mult_bpow.
+Qed.
+
+Definition cond_incr (b : bool) m := if b then (m + 1)%Z else m.
+
+Theorem inbetween_float_round_sign :
+ forall rnd choice,
+ ( forall x m l, inbetween_int m (Rabs x) l ->
+ rnd x = cond_Zopp (Rlt_bool x 0) (choice (Rlt_bool x 0) m l) ) ->
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e (Rabs x) l ->
+ round beta fexp rnd x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) (choice (Rlt_bool x 0) m l)) e).
+Proof.
+intros rnd choice Hc x m l e Hx.
+apply (f_equal (fun m => (Z2R m * bpow e)%R)).
+simpl.
+replace (Rlt_bool x 0) with (Rlt_bool (scaled_mantissa beta fexp x) 0).
+(* *)
+apply Hc.
+apply inbetween_mult_reg with (bpow e).
+apply bpow_gt_0.
+rewrite <- (Rabs_right (bpow e)) at 3.
+rewrite <- Rabs_mult.
+now rewrite scaled_mantissa_mult_bpow.
+apply Rle_ge.
+apply bpow_ge_0.
+(* *)
+destruct (Rlt_bool_spec x 0) as [Zx|Zx] ; simpl.
+apply Rlt_bool_true.
+rewrite <- (Rmult_0_l (bpow (-e))).
+apply Rmult_lt_compat_r with (2 := Zx).
+apply bpow_gt_0.
+apply Rlt_bool_false.
+apply Rmult_le_pos with (1 := Zx).
+apply bpow_ge_0.
+Qed.
+
+(** Relates location and rounding down. *)
+
+Theorem inbetween_int_DN :
+ forall x m l,
+ inbetween_int m x l ->
+ Zfloor x = m.
+Proof.
+intros x m l Hl.
+refine (Zfloor_imp m _ _).
+apply inbetween_bounds with (2 := Hl).
+apply Z2R_lt.
+apply Zlt_succ.
+Qed.
+
+Theorem inbetween_float_DN :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp Zfloor x = F2R (Float beta m e).
+Proof.
+apply inbetween_float_round with (choice := fun m l => m).
+exact inbetween_int_DN.
+Qed.
+
+Definition round_sign_DN s l :=
+ match l with
+ | loc_Exact => false
+ | _ => s
+ end.
+
+Theorem inbetween_int_DN_sign :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ Zfloor x = cond_Zopp (Rlt_bool x 0) (cond_incr (round_sign_DN (Rlt_bool x 0) l) m).
+Proof.
+intros x m l Hl.
+unfold Rabs in Hl.
+destruct (Rcase_abs x) as [Zx|Zx] .
+(* *)
+rewrite Rlt_bool_true with (1 := Zx).
+inversion_clear Hl ; simpl.
+rewrite <- (Ropp_involutive x).
+rewrite H, <- Z2R_opp.
+apply Zfloor_Z2R.
+apply Zfloor_imp.
+split.
+apply Rlt_le.
+rewrite Z2R_opp.
+apply Ropp_lt_cancel.
+now rewrite Ropp_involutive.
+ring_simplify (- (m + 1) + 1)%Z.
+rewrite Z2R_opp.
+apply Ropp_lt_cancel.
+now rewrite Ropp_involutive.
+(* *)
+rewrite Rlt_bool_false.
+inversion_clear Hl ; simpl.
+rewrite H.
+apply Zfloor_Z2R.
+apply Zfloor_imp.
+split.
+now apply Rlt_le.
+apply H.
+now apply Rge_le.
+Qed.
+
+Theorem inbetween_float_DN_sign :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e (Rabs x) l ->
+ round beta fexp Zfloor x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) (cond_incr (round_sign_DN (Rlt_bool x 0) l) m)) e).
+Proof.
+apply inbetween_float_round_sign with (choice := fun s m l => cond_incr (round_sign_DN s l) m).
+exact inbetween_int_DN_sign.
+Qed.
+
+(** Relates location and rounding up. *)
+
+Definition round_UP l :=
+ match l with
+ | loc_Exact => false
+ | _ => true
+ end.
+
+Theorem inbetween_int_UP :
+ forall x m l,
+ inbetween_int m x l ->
+ Zceil x = cond_incr (round_UP l) m.
+Proof.
+intros x m l Hl.
+assert (Hl': l = loc_Exact \/ (l <> loc_Exact /\ round_UP l = true)).
+case l ; try (now left) ; now right ; split.
+destruct Hl' as [Hl'|(Hl1, Hl2)].
+(* Exact *)
+rewrite Hl'.
+destruct Hl ; try easy.
+rewrite H.
+exact (Zceil_Z2R _).
+(* not Exact *)
+rewrite Hl2.
+simpl.
+apply Zceil_imp.
+ring_simplify (m + 1 - 1)%Z.
+refine (let H := _ in conj (proj1 H) (Rlt_le _ _ (proj2 H))).
+apply inbetween_bounds_not_Eq with (2 := Hl1) (1 := Hl).
+Qed.
+
+Theorem inbetween_float_UP :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp Zceil x = F2R (Float beta (cond_incr (round_UP l) m) e).
+Proof.
+apply inbetween_float_round with (choice := fun m l => cond_incr (round_UP l) m).
+exact inbetween_int_UP.
+Qed.
+
+Definition round_sign_UP s l :=
+ match l with
+ | loc_Exact => false
+ | _ => negb s
+ end.
+
+Theorem inbetween_int_UP_sign :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ Zceil x = cond_Zopp (Rlt_bool x 0) (cond_incr (round_sign_UP (Rlt_bool x 0) l) m).
+Proof.
+intros x m l Hl.
+unfold Rabs in Hl.
+destruct (Rcase_abs x) as [Zx|Zx] .
+(* *)
+rewrite Rlt_bool_true with (1 := Zx).
+simpl.
+unfold Zceil.
+apply f_equal.
+inversion_clear Hl ; simpl.
+rewrite H.
+apply Zfloor_Z2R.
+apply Zfloor_imp.
+split.
+now apply Rlt_le.
+apply H.
+(* *)
+rewrite Rlt_bool_false.
+simpl.
+inversion_clear Hl ; simpl.
+rewrite H.
+apply Zceil_Z2R.
+apply Zceil_imp.
+split.
+change (m + 1 - 1)%Z with (Zpred (Zsucc m)).
+now rewrite <- Zpred_succ.
+now apply Rlt_le.
+now apply Rge_le.
+Qed.
+
+Theorem inbetween_float_UP_sign :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e (Rabs x) l ->
+ round beta fexp Zceil x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) (cond_incr (round_sign_UP (Rlt_bool x 0) l) m)) e).
+Proof.
+apply inbetween_float_round_sign with (choice := fun s m l => cond_incr (round_sign_UP s l) m).
+exact inbetween_int_UP_sign.
+Qed.
+
+(** Relates location and rounding toward zero. *)
+
+Definition round_ZR (s : bool) l :=
+ match l with
+ | loc_Exact => false
+ | _ => s
+ end.
+
+Theorem inbetween_int_ZR :
+ forall x m l,
+ inbetween_int m x l ->
+ Ztrunc x = cond_incr (round_ZR (Zlt_bool m 0) l) m.
+Proof with auto with typeclass_instances.
+intros x m l Hl.
+inversion_clear Hl as [Hx|l' Hx Hl'].
+(* Exact *)
+rewrite Hx.
+rewrite Zrnd_Z2R...
+(* not Exact *)
+unfold Ztrunc.
+assert (Hm: Zfloor x = m).
+apply Zfloor_imp.
+exact (conj (Rlt_le _ _ (proj1 Hx)) (proj2 Hx)).
+rewrite Zceil_floor_neq.
+rewrite Hm.
+unfold cond_incr.
+simpl.
+case Rlt_bool_spec ; intros Hx' ;
+ case Zlt_bool_spec ; intros Hm' ; try apply refl_equal.
+elim Rlt_not_le with (1 := Hx').
+apply Rlt_le.
+apply Rle_lt_trans with (2 := proj1 Hx).
+now apply (Z2R_le 0).
+elim Rle_not_lt with (1 := Hx').
+apply Rlt_le_trans with (1 := proj2 Hx).
+apply (Z2R_le _ 0).
+now apply Zlt_le_succ.
+rewrite Hm.
+now apply Rlt_not_eq.
+Qed.
+
+Theorem inbetween_float_ZR :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp Ztrunc x = F2R (Float beta (cond_incr (round_ZR (Zlt_bool m 0) l) m) e).
+Proof.
+apply inbetween_float_round with (choice := fun m l => cond_incr (round_ZR (Zlt_bool m 0) l) m).
+exact inbetween_int_ZR.
+Qed.
+
+Theorem inbetween_int_ZR_sign :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ Ztrunc x = cond_Zopp (Rlt_bool x 0) m.
+Proof.
+intros x m l Hl.
+simpl.
+unfold Ztrunc.
+destruct (Rlt_le_dec x 0) as [Zx|Zx].
+(* *)
+rewrite Rlt_bool_true with (1 := Zx).
+simpl.
+unfold Zceil.
+apply f_equal.
+apply Zfloor_imp.
+rewrite <- Rabs_left with (1 := Zx).
+apply inbetween_bounds with (2 := Hl).
+apply Z2R_lt.
+apply Zlt_succ.
+(* *)
+rewrite Rlt_bool_false with (1 := Zx).
+simpl.
+apply Zfloor_imp.
+rewrite <- Rabs_pos_eq with (1 := Zx).
+apply inbetween_bounds with (2 := Hl).
+apply Z2R_lt.
+apply Zlt_succ.
+Qed.
+
+Theorem inbetween_float_ZR_sign :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e (Rabs x) l ->
+ round beta fexp Ztrunc x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) m) e).
+Proof.
+apply inbetween_float_round_sign with (choice := fun s m l => m).
+exact inbetween_int_ZR_sign.
+Qed.
+
+(** Relates location and rounding to nearest. *)
+
+Definition round_N (p : bool) l :=
+ match l with
+ | loc_Exact => false
+ | loc_Inexact Lt => false
+ | loc_Inexact Eq => p
+ | loc_Inexact Gt => true
+ end.
+
+Theorem inbetween_int_N :
+ forall choice x m l,
+ inbetween_int m x l ->
+ Znearest choice x = cond_incr (round_N (choice m) l) m.
+Proof with auto with typeclass_instances.
+intros choice x m l Hl.
+inversion_clear Hl as [Hx|l' Hx Hl'].
+(* Exact *)
+rewrite Hx.
+rewrite Zrnd_Z2R...
+(* not Exact *)
+unfold Znearest.
+assert (Hm: Zfloor x = m).
+apply Zfloor_imp.
+exact (conj (Rlt_le _ _ (proj1 Hx)) (proj2 Hx)).
+rewrite Zceil_floor_neq.
+rewrite Hm.
+replace (Rcompare (x - Z2R m) (/2)) with l'.
+now case l'.
+rewrite <- Hl'.
+rewrite Z2R_plus.
+rewrite <- (Rcompare_plus_r (- Z2R m) x).
+apply f_equal.
+simpl (Z2R 1).
+field.
+rewrite Hm.
+now apply Rlt_not_eq.
+Qed.
+
+Theorem inbetween_int_N_sign :
+ forall choice x m l,
+ inbetween_int m (Rabs x) l ->
+ Znearest choice x = cond_Zopp (Rlt_bool x 0) (cond_incr (round_N (if Rlt_bool x 0 then negb (choice (-(m + 1))%Z) else choice m) l) m).
+Proof with auto with typeclass_instances.
+intros choice x m l Hl.
+simpl.
+unfold Rabs in Hl.
+destruct (Rcase_abs x) as [Zx|Zx].
+(* *)
+rewrite Rlt_bool_true with (1 := Zx).
+simpl.
+rewrite <- (Ropp_involutive x).
+rewrite Znearest_opp.
+apply f_equal.
+inversion_clear Hl as [Hx|l' Hx Hl'].
+rewrite Hx.
+apply Zrnd_Z2R...
+assert (Hm: Zfloor (-x) = m).
+apply Zfloor_imp.
+exact (conj (Rlt_le _ _ (proj1 Hx)) (proj2 Hx)).
+unfold Znearest.
+rewrite Zceil_floor_neq.
+rewrite Hm.
+replace (Rcompare (- x - Z2R m) (/2)) with l'.
+now case l'.
+rewrite <- Hl'.
+rewrite Z2R_plus.
+rewrite <- (Rcompare_plus_r (- Z2R m) (-x)).
+apply f_equal.
+simpl (Z2R 1).
+field.
+rewrite Hm.
+now apply Rlt_not_eq.
+(* *)
+generalize (Rge_le _ _ Zx).
+clear Zx. intros Zx.
+rewrite Rlt_bool_false with (1 := Zx).
+simpl.
+inversion_clear Hl as [Hx|l' Hx Hl'].
+rewrite Hx.
+apply Zrnd_Z2R...
+assert (Hm: Zfloor x = m).
+apply Zfloor_imp.
+exact (conj (Rlt_le _ _ (proj1 Hx)) (proj2 Hx)).
+unfold Znearest.
+rewrite Zceil_floor_neq.
+rewrite Hm.
+replace (Rcompare (x - Z2R m) (/2)) with l'.
+now case l'.
+rewrite <- Hl'.
+rewrite Z2R_plus.
+rewrite <- (Rcompare_plus_r (- Z2R m) x).
+apply f_equal.
+simpl (Z2R 1).
+field.
+rewrite Hm.
+now apply Rlt_not_eq.
+Qed.
+
+(** Relates location and rounding to nearest even. *)
+
+Theorem inbetween_int_NE :
+ forall x m l,
+ inbetween_int m x l ->
+ ZnearestE x = cond_incr (round_N (negb (Zeven m)) l) m.
+Proof.
+intros x m l Hl.
+now apply inbetween_int_N with (choice := fun x => negb (Zeven x)).
+Qed.
+
+Theorem inbetween_float_NE :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp ZnearestE x = F2R (Float beta (cond_incr (round_N (negb (Zeven m)) l) m) e).
+Proof.
+apply inbetween_float_round with (choice := fun m l => cond_incr (round_N (negb (Zeven m)) l) m).
+exact inbetween_int_NE.
+Qed.
+
+Theorem inbetween_int_NE_sign :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ ZnearestE x = cond_Zopp (Rlt_bool x 0) (cond_incr (round_N (negb (Zeven m)) l) m).
+Proof.
+intros x m l Hl.
+erewrite inbetween_int_N_sign with (choice := fun x => negb (Zeven x)).
+2: eexact Hl.
+apply f_equal.
+case Rlt_bool.
+rewrite Zeven_opp, Zeven_plus.
+now case (Zeven m).
+apply refl_equal.
+Qed.
+
+Theorem inbetween_float_NE_sign :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e (Rabs x) l ->
+ round beta fexp ZnearestE x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) (cond_incr (round_N (negb (Zeven m)) l) m)) e).
+Proof.
+apply inbetween_float_round_sign with (choice := fun s m l => cond_incr (round_N (negb (Zeven m)) l) m).
+exact inbetween_int_NE_sign.
+Qed.
+
+(** Relates location and rounding to nearest away. *)
+
+Theorem inbetween_int_NA :
+ forall x m l,
+ inbetween_int m x l ->
+ ZnearestA x = cond_incr (round_N (Zle_bool 0 m) l) m.
+Proof.
+intros x m l Hl.
+now apply inbetween_int_N with (choice := fun x => Zle_bool 0 x).
+Qed.
+
+Theorem inbetween_float_NA :
+ forall x m l,
+ let e := canonic_exp beta fexp x in
+ inbetween_float beta m e x l ->
+ round beta fexp ZnearestA x = F2R (Float beta (cond_incr (round_N (Zle_bool 0 m) l) m) e).
+Proof.
+apply inbetween_float_round with (choice := fun m l => cond_incr (round_N (Zle_bool 0 m) l) m).
+exact inbetween_int_NA.
+Qed.
+
+Theorem inbetween_int_NA_sign :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ ZnearestA x = cond_Zopp (Rlt_bool x 0) (cond_incr (round_N true l) m).
+Proof.
+intros x m l Hl.
+erewrite inbetween_int_N_sign with (choice := Zle_bool 0).
+2: eexact Hl.
+apply f_equal.
+assert (Hm: (0 <= m)%Z).
+apply Zlt_succ_le.
+apply lt_Z2R.
+apply Rle_lt_trans with (Rabs x).
+apply Rabs_pos.
+refine (proj2 (inbetween_bounds _ _ _ _ _ Hl)).
+apply Z2R_lt.
+apply Zlt_succ.
+rewrite Zle_bool_true with (1 := Hm).
+rewrite Zle_bool_false.
+now case Rlt_bool.
+omega.
+Qed.
+
+Definition truncate_aux t k :=
+ let '(m, e, l) := t in
+ let p := Zpower beta k in
+ (Zdiv m p, (e + k)%Z, new_location p (Zmod m p) l).
+
+Theorem truncate_aux_comp :
+ forall t k1 k2,
+ (0 < k1)%Z ->
+ (0 < k2)%Z ->
+ truncate_aux t (k1 + k2) = truncate_aux (truncate_aux t k1) k2.
+Proof.
+intros ((m,e),l) k1 k2 Hk1 Hk2.
+unfold truncate_aux.
+destruct (inbetween_float_ex beta m e l) as (x,Hx).
+assert (B1 := inbetween_float_new_location _ _ _ _ _ _ Hk1 Hx).
+assert (Hk3: (0 < k1 + k2)%Z).
+change Z0 with (0 + 0)%Z.
+now apply Zplus_lt_compat.
+assert (B3 := inbetween_float_new_location _ _ _ _ _ _ Hk3 Hx).
+assert (B2 := inbetween_float_new_location _ _ _ _ _ _ Hk2 B1).
+rewrite Zplus_assoc in B3.
+destruct (inbetween_float_unique _ _ _ _ _ _ _ B2 B3).
+now rewrite H, H0, Zplus_assoc.
+Qed.
+
+(** Given a triple (mantissa, exponent, position), this function
+ computes a triple with a canonic exponent, assuming the
+ original triple had enough precision. *)
+
+Definition truncate t :=
+ let '(m, e, l) := t in
+ let k := (fexp (Zdigits beta m + e) - e)%Z in
+ if Zlt_bool 0 k then truncate_aux t k
+ else t.
+
+Theorem truncate_0 :
+ forall e l,
+ let '(m', e', l') := truncate (0, e, l)%Z in
+ m' = Z0.
+Proof.
+intros e l.
+unfold truncate.
+case Zlt_bool.
+simpl.
+apply Zdiv_0_l.
+apply refl_equal.
+Qed.
+
+Theorem generic_format_truncate :
+ forall m e l,
+ (0 <= m)%Z ->
+ let '(m', e', l') := truncate (m, e, l) in
+ format (F2R (Float beta m' e')).
+Proof.
+intros m e l Hm.
+unfold truncate.
+set (k := (fexp (Zdigits beta m + e) - e)%Z).
+case Zlt_bool_spec ; intros Hk.
+(* *)
+unfold truncate_aux.
+apply generic_format_F2R.
+intros Hd.
+unfold canonic_exp.
+rewrite ln_beta_F2R_Zdigits with (1 := Hd).
+rewrite Zdigits_div_Zpower with (1 := Hm).
+replace (Zdigits beta m - k + (e + k))%Z with (Zdigits beta m + e)%Z by ring.
+unfold k.
+ring_simplify.
+apply Zle_refl.
+split.
+now apply Zlt_le_weak.
+apply Znot_gt_le.
+contradict Hd.
+apply Zdiv_small.
+apply conj with (1 := Hm).
+rewrite <- Zabs_eq with (1 := Hm).
+apply Zpower_gt_Zdigits.
+apply Zlt_le_weak.
+now apply Zgt_lt.
+(* *)
+destruct (Zle_lt_or_eq _ _ Hm) as [Hm'|Hm'].
+apply generic_format_F2R.
+unfold canonic_exp.
+rewrite ln_beta_F2R_Zdigits.
+2: now apply Zgt_not_eq.
+unfold k in Hk. clear -Hk.
+omega.
+rewrite <- Hm', F2R_0.
+apply generic_format_0.
+Qed.
+
+Theorem truncate_correct_format :
+ forall m e,
+ m <> Z0 ->
+ let x := F2R (Float beta m e) in
+ generic_format beta fexp x ->
+ (e <= fexp (Zdigits beta m + e))%Z ->
+ let '(m', e', l') := truncate (m, e, loc_Exact) in
+ x = F2R (Float beta m' e') /\ e' = canonic_exp beta fexp x.
+Proof.
+intros m e Hm x Fx He.
+assert (Hc: canonic_exp beta fexp x = fexp (Zdigits beta m + e)).
+unfold canonic_exp, x.
+now rewrite ln_beta_F2R_Zdigits.
+unfold truncate.
+rewrite <- Hc.
+set (k := (canonic_exp beta fexp x - e)%Z).
+case Zlt_bool_spec ; intros Hk.
+(* *)
+unfold truncate_aux.
+rewrite Fx at 1.
+refine (let H := _ in conj _ H).
+unfold k. ring.
+rewrite <- H.
+apply F2R_eq_compat.
+replace (scaled_mantissa beta fexp x) with (Z2R (Zfloor (scaled_mantissa beta fexp x))).
+rewrite Ztrunc_Z2R.
+unfold scaled_mantissa.
+rewrite <- H.
+unfold x, F2R. simpl.
+rewrite Rmult_assoc, <- bpow_plus.
+ring_simplify (e + - (e + k))%Z.
+clear -Hm Hk.
+destruct k as [|k|k] ; try easy.
+simpl.
+apply Zfloor_div.
+intros H.
+generalize (Zpower_pos_gt_0 beta k) (Zle_bool_imp_le _ _ (radix_prop beta)).
+omega.
+rewrite scaled_mantissa_generic with (1 := Fx).
+now rewrite Zfloor_Z2R.
+(* *)
+split.
+apply refl_equal.
+unfold k in Hk.
+omega.
+Qed.
+
+Theorem truncate_correct_partial :
+ forall x m e l,
+ (0 < x)%R ->
+ inbetween_float beta m e x l ->
+ (e <= fexp (Zdigits beta m + e))%Z ->
+ let '(m', e', l') := truncate (m, e, l) in
+ inbetween_float beta m' e' x l' /\ e' = canonic_exp beta fexp x.
+Proof.
+intros x m e l Hx H1 H2.
+unfold truncate.
+set (k := (fexp (Zdigits beta m + e) - e)%Z).
+set (p := Zpower beta k).
+(* *)
+assert (Hx': (F2R (Float beta m e) <= x < F2R (Float beta (m + 1) e))%R).
+apply inbetween_float_bounds with (1 := H1).
+(* *)
+assert (Hm: (0 <= m)%Z).
+cut (0 < m + 1)%Z. omega.
+apply F2R_lt_reg with beta e.
+rewrite F2R_0.
+apply Rlt_trans with (1 := Hx).
+apply Hx'.
+assert (He: (e + k = canonic_exp beta fexp x)%Z).
+(* . *)
+unfold canonic_exp.
+destruct (Zle_lt_or_eq _ _ Hm) as [Hm'|Hm'].
+(* .. 0 < m *)
+rewrite ln_beta_F2R_bounds with (1 := Hm') (2 := Hx').
+assert (H: m <> Z0).
+apply sym_not_eq.
+now apply Zlt_not_eq.
+rewrite ln_beta_F2R with (1 := H).
+rewrite <- Zdigits_ln_beta with (1 := H).
+unfold k.
+ring.
+(* .. m = 0 *)
+rewrite <- Hm' in H2.
+destruct (ln_beta beta x) as (ex, Hex).
+simpl.
+specialize (Hex (Rgt_not_eq _ _ Hx)).
+unfold k.
+ring_simplify.
+rewrite <- Hm'.
+simpl.
+apply sym_eq.
+apply valid_exp.
+exact H2.
+apply Zle_trans with e.
+eapply bpow_lt_bpow.
+apply Rle_lt_trans with (1 := proj1 Hex).
+rewrite Rabs_pos_eq.
+rewrite <- F2R_bpow.
+rewrite <- Hm' in Hx'.
+apply Hx'.
+now apply Rlt_le.
+exact H2.
+(* . *)
+generalize (Zlt_cases 0 k).
+case (Zlt_bool 0 k) ; intros Hk ; unfold k in Hk.
+split.
+now apply inbetween_float_new_location.
+exact He.
+split.
+exact H1.
+rewrite <- He.
+unfold k.
+omega.
+Qed.
+
+Theorem truncate_correct :
+ forall x m e l,
+ (0 <= x)%R ->
+ inbetween_float beta m e x l ->
+ (e <= fexp (Zdigits beta m + e))%Z \/ l = loc_Exact ->
+ let '(m', e', l') := truncate (m, e, l) in
+ inbetween_float beta m' e' x l' /\
+ (e' = canonic_exp beta fexp x \/ (l' = loc_Exact /\ format x)).
+Proof.
+intros x m e l [Hx|Hx] H1 H2.
+(* 0 < x *)
+destruct (Zle_or_lt e (fexp (Zdigits beta m + e))) as [H3|H3].
+(* . enough digits *)
+generalize (truncate_correct_partial x m e l Hx H1 H3).
+destruct (truncate (m, e, l)) as ((m', e'), l').
+intros (H4, H5).
+split.
+exact H4.
+now left.
+(* . not enough digits but loc_Exact *)
+destruct H2 as [H2|H2].
+elim (Zlt_irrefl e).
+now apply Zle_lt_trans with (1 := H2).
+rewrite H2 in H1 |- *.
+unfold truncate.
+generalize (Zlt_cases 0 (fexp (Zdigits beta m + e) - e)).
+case Zlt_bool.
+intros H.
+apply False_ind.
+omega.
+intros _.
+split.
+exact H1.
+right.
+split.
+apply refl_equal.
+inversion_clear H1.
+rewrite H.
+apply generic_format_F2R.
+intros Zm.
+unfold canonic_exp.
+rewrite ln_beta_F2R_Zdigits with (1 := Zm).
+now apply Zlt_le_weak.
+(* x = 0 *)
+assert (Hm: m = Z0).
+cut (m <= 0 < m + 1)%Z. omega.
+assert (Hx': (F2R (Float beta m e) <= x < F2R (Float beta (m + 1) e))%R).
+apply inbetween_float_bounds with (1 := H1).
+rewrite <- Hx in Hx'.
+split.
+apply F2R_le_0_reg with (1 := proj1 Hx').
+apply F2R_gt_0_reg with (1 := proj2 Hx').
+rewrite Hm, <- Hx in H1 |- *.
+clear -H1.
+case H1.
+(* . *)
+intros _.
+assert (exists e', truncate (Z0, e, loc_Exact) = (Z0, e', loc_Exact)).
+unfold truncate, truncate_aux.
+case Zlt_bool.
+rewrite Zdiv_0_l, Zmod_0_l.
+eexists.
+apply f_equal.
+unfold new_location.
+now case Zeven.
+now eexists.
+destruct H as (e', H).
+rewrite H.
+split.
+constructor.
+apply sym_eq.
+apply F2R_0.
+right.
+repeat split.
+apply generic_format_0.
+(* . *)
+intros l' (H, _) _.
+rewrite F2R_0 in H.
+elim Rlt_irrefl with (1 := H).
+Qed.
+
+Section round_dir.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Variable choice : Z -> location -> Z.
+Hypothesis inbetween_int_valid :
+ forall x m l,
+ inbetween_int m x l ->
+ rnd x = choice m l.
+
+Theorem round_any_correct :
+ forall x m e l,
+ inbetween_float beta m e x l ->
+ (e = canonic_exp beta fexp x \/ (l = loc_Exact /\ format x)) ->
+ round beta fexp rnd x = F2R (Float beta (choice m l) e).
+Proof with auto with typeclass_instances.
+intros x m e l Hin [He|(Hl,Hf)].
+rewrite He in Hin |- *.
+apply inbetween_float_round with (2 := Hin).
+exact inbetween_int_valid.
+rewrite Hl in Hin.
+inversion_clear Hin.
+rewrite Hl.
+replace (choice m loc_Exact) with m.
+rewrite <- H.
+apply round_generic...
+rewrite <- (Zrnd_Z2R rnd m) at 1.
+apply inbetween_int_valid.
+now constructor.
+Qed.
+
+(** Truncating a triple is sufficient to round a real number. *)
+
+Theorem round_trunc_any_correct :
+ forall x m e l,
+ (0 <= x)%R ->
+ inbetween_float beta m e x l ->
+ (e <= fexp (Zdigits beta m + e))%Z \/ l = loc_Exact ->
+ round beta fexp rnd x = let '(m', e', l') := truncate (m, e, l) in F2R (Float beta (choice m' l') e').
+Proof.
+intros x m e l Hx Hl He.
+generalize (truncate_correct x m e l Hx Hl He).
+destruct (truncate (m, e, l)) as ((m', e'), l').
+intros (H1, H2).
+now apply round_any_correct.
+Qed.
+
+End round_dir.
+
+Section round_dir_sign.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Variable choice : bool -> Z -> location -> Z.
+Hypothesis inbetween_int_valid :
+ forall x m l,
+ inbetween_int m (Rabs x) l ->
+ rnd x = cond_Zopp (Rlt_bool x 0) (choice (Rlt_bool x 0) m l).
+
+Theorem round_sign_any_correct :
+ forall x m e l,
+ inbetween_float beta m e (Rabs x) l ->
+ (e = canonic_exp beta fexp x \/ (l = loc_Exact /\ format x)) ->
+ round beta fexp rnd x = F2R (Float beta (cond_Zopp (Rlt_bool x 0) (choice (Rlt_bool x 0) m l)) e).
+Proof with auto with typeclass_instances.
+intros x m e l Hin [He|(Hl,Hf)].
+rewrite He in Hin |- *.
+apply inbetween_float_round_sign with (2 := Hin).
+exact inbetween_int_valid.
+rewrite Hl in Hin.
+inversion_clear Hin.
+rewrite Hl.
+replace (choice (Rlt_bool x 0) m loc_Exact) with m.
+(* *)
+unfold Rabs in H.
+destruct (Rcase_abs x) as [Zx|Zx].
+rewrite Rlt_bool_true with (1 := Zx).
+simpl.
+rewrite F2R_Zopp.
+rewrite <- H, Ropp_involutive.
+apply round_generic...
+rewrite Rlt_bool_false.
+simpl.
+rewrite <- H.
+now apply round_generic.
+now apply Rge_le.
+(* *)
+destruct (Rlt_bool_spec x 0) as [Zx|Zx].
+(* . *)
+apply Zopp_inj.
+change (- m = cond_Zopp true (choice true m loc_Exact))%Z.
+rewrite <- (Zrnd_Z2R rnd (-m)) at 1.
+assert (Z2R (-m) < 0)%R.
+rewrite Z2R_opp.
+apply Ropp_lt_gt_0_contravar.
+apply (Z2R_lt 0).
+apply F2R_gt_0_reg with beta e.
+rewrite <- H.
+apply Rabs_pos_lt.
+now apply Rlt_not_eq.
+rewrite <- Rlt_bool_true with (1 := H0).
+apply inbetween_int_valid.
+constructor.
+rewrite Rabs_left with (1 := H0).
+rewrite Z2R_opp.
+apply Ropp_involutive.
+(* . *)
+change (m = cond_Zopp false (choice false m loc_Exact))%Z.
+rewrite <- (Zrnd_Z2R rnd m) at 1.
+assert (0 <= Z2R m)%R.
+apply (Z2R_le 0).
+apply F2R_ge_0_reg with beta e.
+rewrite <- H.
+apply Rabs_pos.
+rewrite <- Rlt_bool_false with (1 := H0).
+apply inbetween_int_valid.
+constructor.
+now apply Rabs_pos_eq.
+Qed.
+
+(** Truncating a triple is sufficient to round a real number. *)
+
+Theorem round_trunc_sign_any_correct :
+ forall x m e l,
+ inbetween_float beta m e (Rabs x) l ->
+ (e <= fexp (Zdigits beta m + e))%Z \/ l = loc_Exact ->
+ round beta fexp rnd x = let '(m', e', l') := truncate (m, e, l) in F2R (Float beta (cond_Zopp (Rlt_bool x 0) (choice (Rlt_bool x 0) m' l')) e').
+Proof.
+intros x m e l Hl He.
+generalize (truncate_correct (Rabs x) m e l (Rabs_pos _) Hl He).
+destruct (truncate (m, e, l)) as ((m', e'), l').
+intros (H1, H2).
+apply round_sign_any_correct.
+exact H1.
+destruct H2 as [H2|(H2,H3)].
+left.
+now rewrite <- canonic_exp_abs.
+right.
+split.
+exact H2.
+unfold Rabs in H3.
+destruct (Rcase_abs x) in H3.
+rewrite <- Ropp_involutive.
+now apply generic_format_opp.
+exact H3.
+Qed.
+
+End round_dir_sign.
+
+(** * Instances of the theorems above, for the usual rounding modes. *)
+
+Definition round_DN_correct :=
+ round_any_correct _ (fun m _ => m) inbetween_int_DN.
+
+Definition round_trunc_DN_correct :=
+ round_trunc_any_correct _ (fun m _ => m) inbetween_int_DN.
+
+Definition round_sign_DN_correct :=
+ round_sign_any_correct _ (fun s m l => cond_incr (round_sign_DN s l) m) inbetween_int_DN_sign.
+
+Definition round_trunc_sign_DN_correct :=
+ round_trunc_sign_any_correct _ (fun s m l => cond_incr (round_sign_DN s l) m) inbetween_int_DN_sign.
+
+Definition round_UP_correct :=
+ round_any_correct _ (fun m l => cond_incr (round_UP l) m) inbetween_int_UP.
+
+Definition round_trunc_UP_correct :=
+ round_trunc_any_correct _ (fun m l => cond_incr (round_UP l) m) inbetween_int_UP.
+
+Definition round_sign_UP_correct :=
+ round_sign_any_correct _ (fun s m l => cond_incr (round_sign_UP s l) m) inbetween_int_UP_sign.
+
+Definition round_trunc_sign_UP_correct :=
+ round_trunc_sign_any_correct _ (fun s m l => cond_incr (round_sign_UP s l) m) inbetween_int_UP_sign.
+
+Definition round_ZR_correct :=
+ round_any_correct _ (fun m l => cond_incr (round_ZR (Zlt_bool m 0) l) m) inbetween_int_ZR.
+
+Definition round_trunc_ZR_correct :=
+ round_trunc_any_correct _ (fun m l => cond_incr (round_ZR (Zlt_bool m 0) l) m) inbetween_int_ZR.
+
+Definition round_sign_ZR_correct :=
+ round_sign_any_correct _ (fun s m l => m) inbetween_int_ZR_sign.
+
+Definition round_trunc_sign_ZR_correct :=
+ round_trunc_sign_any_correct _ (fun s m l => m) inbetween_int_ZR_sign.
+
+Definition round_NE_correct :=
+ round_any_correct _ (fun m l => cond_incr (round_N (negb (Zeven m)) l) m) inbetween_int_NE.
+
+Definition round_trunc_NE_correct :=
+ round_trunc_any_correct _ (fun m l => cond_incr (round_N (negb (Zeven m)) l) m) inbetween_int_NE.
+
+Definition round_sign_NE_correct :=
+ round_sign_any_correct _ (fun s m l => cond_incr (round_N (negb (Zeven m)) l) m) inbetween_int_NE_sign.
+
+Definition round_trunc_sign_NE_correct :=
+ round_trunc_sign_any_correct _ (fun s m l => cond_incr (round_N (negb (Zeven m)) l) m) inbetween_int_NE_sign.
+
+Definition round_NA_correct :=
+ round_any_correct _ (fun m l => cond_incr (round_N (Zle_bool 0 m) l) m) inbetween_int_NA.
+
+Definition round_trunc_NA_correct :=
+ round_trunc_any_correct _ (fun m l => cond_incr (round_N (Zle_bool 0 m) l) m) inbetween_int_NA.
+
+Definition round_sign_NA_correct :=
+ round_sign_any_correct _ (fun s m l => cond_incr (round_N true l) m) inbetween_int_NA_sign.
+
+Definition round_trunc_sign_NA_correct :=
+ round_trunc_sign_any_correct _ (fun s m l => cond_incr (round_N true l) m) inbetween_int_NA_sign.
+
+End Fcalc_round_fexp.
+
+(** Specialization of truncate for FIX formats. *)
+
+Variable emin : Z.
+
+Definition truncate_FIX t :=
+ let '(m, e, l) := t in
+ let k := (emin - e)%Z in
+ if Zlt_bool 0 k then
+ let p := Zpower beta k in
+ (Zdiv m p, (e + k)%Z, new_location p (Zmod m p) l)
+ else t.
+
+Theorem truncate_FIX_correct :
+ forall x m e l,
+ inbetween_float beta m e x l ->
+ (e <= emin)%Z \/ l = loc_Exact ->
+ let '(m', e', l') := truncate_FIX (m, e, l) in
+ inbetween_float beta m' e' x l' /\
+ (e' = canonic_exp beta (FIX_exp emin) x \/ (l' = loc_Exact /\ generic_format beta (FIX_exp emin) x)).
+Proof.
+intros x m e l H1 H2.
+unfold truncate_FIX.
+set (k := (emin - e)%Z).
+set (p := Zpower beta k).
+unfold canonic_exp, FIX_exp.
+generalize (Zlt_cases 0 k).
+case (Zlt_bool 0 k) ; intros Hk.
+(* shift *)
+split.
+now apply inbetween_float_new_location.
+clear H2.
+left.
+unfold k.
+ring.
+(* no shift *)
+split.
+exact H1.
+unfold k in Hk.
+destruct H2 as [H2|H2].
+left.
+omega.
+right.
+split.
+exact H2.
+rewrite H2 in H1.
+inversion_clear H1.
+rewrite H.
+apply generic_format_F2R.
+unfold canonic_exp.
+omega.
+Qed.
+
+End Fcalc_round.
diff --git a/flocq/Calc/Fcalc_sqrt.v b/flocq/Calc/Fcalc_sqrt.v
new file mode 100644
index 0000000..e6fe74b
--- /dev/null
+++ b/flocq/Calc/Fcalc_sqrt.v
@@ -0,0 +1,346 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Helper functions and theorems for computing the rounded square root of a floating-point number. *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_digits.
+Require Import Fcore_float_prop.
+Require Import Fcalc_bracket.
+Require Import Fcalc_digits.
+
+Section Fcalc_sqrt.
+
+Fixpoint Zsqrt_aux (p : positive) : Z * Z :=
+ match p with
+ | xH => (1, 0)%Z
+ | xO xH => (1, 1)%Z
+ | xI xH => (1, 2)%Z
+ | xO (xO p) =>
+ let (q, r) := Zsqrt_aux p in
+ let r' := (4 * r)%Z in
+ let d := (r' - (4 * q + 1))%Z in
+ if Zlt_bool d 0 then (2 * q, r')%Z else (2 * q + 1, d)%Z
+ | xO (xI p) =>
+ let (q, r) := Zsqrt_aux p in
+ let r' := (4 * r + 2)%Z in
+ let d := (r' - (4 * q + 1))%Z in
+ if Zlt_bool d 0 then (2 * q, r')%Z else (2 * q + 1, d)%Z
+ | xI (xO p) =>
+ let (q, r) := Zsqrt_aux p in
+ let r' := (4 * r + 1)%Z in
+ let d := (r' - (4 * q + 1))%Z in
+ if Zlt_bool d 0 then (2 * q, r')%Z else (2 * q + 1, d)%Z
+ | xI (xI p) =>
+ let (q, r) := Zsqrt_aux p in
+ let r' := (4 * r + 3)%Z in
+ let d := (r' - (4 * q + 1))%Z in
+ if Zlt_bool d 0 then (2 * q, r')%Z else (2 * q + 1, d)%Z
+ end.
+
+Lemma Zsqrt_ind :
+ forall P : positive -> Prop,
+ P xH -> P (xO xH) -> P (xI xH) ->
+ ( forall p, P p -> P (xO (xO p)) /\ P (xO (xI p)) /\ P (xI (xO p)) /\ P (xI (xI p)) ) ->
+ forall p, P p.
+Proof.
+intros P H1 H2 H3 Hp.
+fix 1.
+intros [[p|p|]|[p|p|]|].
+refine (proj2 (proj2 (proj2 (Hp p _)))).
+apply Zsqrt_ind.
+refine (proj1 (proj2 (proj2 (Hp p _)))).
+apply Zsqrt_ind.
+exact H3.
+refine (proj1 (proj2 (Hp p _))).
+apply Zsqrt_ind.
+refine (proj1 (Hp p _)).
+apply Zsqrt_ind.
+exact H2.
+exact H1.
+Qed.
+
+Lemma Zsqrt_aux_correct :
+ forall p,
+ let (q, r) := Zsqrt_aux p in
+ Zpos p = (q * q + r)%Z /\ (0 <= r <= 2 * q)%Z.
+Proof.
+intros p.
+elim p using Zsqrt_ind ; clear p.
+now repeat split.
+now repeat split.
+now repeat split.
+intros p.
+Opaque Zmult. simpl. Transparent Zmult.
+destruct (Zsqrt_aux p) as (q, r).
+intros (Hq, Hr).
+change (Zpos p~0~0) with (4 * Zpos p)%Z.
+change (Zpos p~0~1) with (4 * Zpos p + 1)%Z.
+change (Zpos p~1~0) with (4 * Zpos p + 2)%Z.
+change (Zpos p~1~1) with (4 * Zpos p + 3)%Z.
+rewrite Hq. clear Hq.
+repeat split.
+generalize (Zlt_cases (4 * r - (4 * q + 1)) 0).
+case Zlt_bool ; ( split ; [ ring | omega ] ).
+generalize (Zlt_cases (4 * r + 2 - (4 * q + 1)) 0).
+case Zlt_bool ; ( split ; [ ring | omega ] ).
+generalize (Zlt_cases (4 * r + 1 - (4 * q + 1)) 0).
+case Zlt_bool ; ( split ; [ ring | omega ] ).
+generalize (Zlt_cases (4 * r + 3 - (4 * q + 1)) 0).
+case Zlt_bool ; ( split ; [ ring | omega ] ).
+Qed.
+
+(** Computes the integer square root and its remainder, but
+ without carrying a proof, contrarily to the operation of
+ the standard libary. *)
+
+Definition Zsqrt p :=
+ match p with
+ | Zpos p => Zsqrt_aux p
+ | _ => (0, 0)%Z
+ end.
+
+Theorem Zsqrt_correct :
+ forall x,
+ (0 <= x)%Z ->
+ let (q, r) := Zsqrt x in
+ x = (q * q + r)%Z /\ (0 <= r <= 2 * q)%Z.
+Proof.
+unfold Zsqrt.
+intros [|p|p] Hx.
+now repeat split.
+apply Zsqrt_aux_correct.
+now elim Hx.
+Qed.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+(** Computes a mantissa of precision p, the corresponding exponent,
+ and the position with respect to the real square root of the
+ input floating-point number.
+
+The algorithm performs the following steps:
+- Shift the mantissa so that it has at least 2p-1 digits;
+ shift it one digit more if the new exponent is not even.
+- Compute the square root s (at least p digits) of the new
+ mantissa, and its remainder r.
+- Compute the position according to the remainder:
+ -- r == 0 => Eq,
+ -- r <= s => Lo,
+ -- r >= s => Up.
+
+Complexity is fine as long as p1 <= 2p-1.
+*)
+
+Definition Fsqrt_core prec m e :=
+ let d := Zdigits beta m in
+ let s := Zmax (2 * prec - d) 0 in
+ let e' := (e - s)%Z in
+ let (s', e'') := if Zeven e' then (s, e') else (s + 1, e' - 1)%Z in
+ let m' :=
+ match s' with
+ | Zpos p => (m * Zpower_pos beta p)%Z
+ | _ => m
+ end in
+ let (q, r) := Zsqrt m' in
+ let l :=
+ if Zeq_bool r 0 then loc_Exact
+ else loc_Inexact (if Zle_bool r q then Lt else Gt) in
+ (q, Zdiv2 e'', l).
+
+Theorem Fsqrt_core_correct :
+ forall prec m e,
+ (0 < m)%Z ->
+ let '(m', e', l) := Fsqrt_core prec m e in
+ (prec <= Zdigits beta m')%Z /\
+ inbetween_float beta m' e' (sqrt (F2R (Float beta m e))) l.
+Proof.
+intros prec m e Hm.
+unfold Fsqrt_core.
+set (d := Zdigits beta m).
+set (s := Zmax (2 * prec - d) 0).
+(* . exponent *)
+case_eq (if Zeven (e - s) then (s, (e - s)%Z) else ((s + 1)%Z, (e - s - 1)%Z)).
+intros s' e' Hse.
+assert (He: (Zeven e' = true /\ 0 <= s' /\ 2 * prec - d <= s' /\ s' + e' = e)%Z).
+revert Hse.
+case_eq (Zeven (e - s)) ; intros He Hse ; inversion Hse.
+repeat split.
+exact He.
+unfold s.
+apply Zle_max_r.
+apply Zle_max_l.
+ring.
+assert (H: (Zmax (2 * prec - d) 0 <= s + 1)%Z).
+fold s.
+apply Zle_succ.
+repeat split.
+unfold Zminus at 1.
+now rewrite Zeven_plus, He.
+apply Zle_trans with (2 := H).
+apply Zle_max_r.
+apply Zle_trans with (2 := H).
+apply Zle_max_l.
+ring.
+clear -Hm He.
+destruct He as (He1, (He2, (He3, He4))).
+(* . shift *)
+set (m' := match s' with
+ | Z0 => m
+ | Zpos p => (m * Zpower_pos beta p)%Z
+ | Zneg _ => m
+ end).
+assert (Hs: F2R (Float beta m' e') = F2R (Float beta m e) /\ (2 * prec <= Zdigits beta m')%Z /\ (0 < m')%Z).
+rewrite <- He4.
+unfold m'.
+destruct s' as [|p|p].
+repeat split ; try easy.
+fold d.
+omega.
+fold (Zpower beta (Zpos p)).
+split.
+replace (Zpos p) with (Zpos p + e' - e')%Z by ring.
+rewrite <- F2R_change_exp.
+apply (f_equal (fun v => F2R (Float beta m v))).
+ring.
+assert (0 < Zpos p)%Z by easy.
+omega.
+split.
+rewrite Zdigits_mult_Zpower.
+fold d.
+omega.
+apply sym_not_eq.
+now apply Zlt_not_eq.
+easy.
+apply Zmult_lt_0_compat.
+exact Hm.
+now apply Zpower_gt_0.
+now elim He2.
+clearbody m'.
+destruct Hs as (Hs1, (Hs2, Hs3)).
+generalize (Zsqrt_correct m' (Zlt_le_weak _ _ Hs3)).
+destruct (Zsqrt m') as (q, r).
+intros (Hq, Hr).
+rewrite <- Hs1. clear Hs1.
+split.
+(* . mantissa width *)
+apply Zmult_le_reg_r with 2%Z.
+easy.
+rewrite Zmult_comm.
+apply Zle_trans with (1 := Hs2).
+rewrite Hq.
+apply Zle_trans with (Zdigits beta (q + q + q * q)).
+apply Zdigits_le.
+rewrite <- Hq.
+now apply Zlt_le_weak.
+omega.
+replace (Zdigits beta q * 2)%Z with (Zdigits beta q + Zdigits beta q)%Z by ring.
+apply Zdigits_mult_strong.
+omega.
+omega.
+(* . round *)
+unfold inbetween_float, F2R. simpl.
+rewrite sqrt_mult.
+2: now apply (Z2R_le 0) ; apply Zlt_le_weak.
+2: apply Rlt_le ; apply bpow_gt_0.
+destruct (Zeven_ex e') as (e2, Hev).
+rewrite He1, Zplus_0_r in Hev. clear He1.
+rewrite Hev.
+replace (Zdiv2 (2 * e2)) with e2 by now case e2.
+replace (2 * e2)%Z with (e2 + e2)%Z by ring.
+rewrite bpow_plus.
+fold (Rsqr (bpow e2)).
+rewrite sqrt_Rsqr.
+2: apply Rlt_le ; apply bpow_gt_0.
+apply inbetween_mult_compat.
+apply bpow_gt_0.
+rewrite Hq.
+case Zeq_bool_spec ; intros Hr'.
+(* .. r = 0 *)
+rewrite Hr', Zplus_0_r, Z2R_mult.
+fold (Rsqr (Z2R q)).
+rewrite sqrt_Rsqr.
+now constructor.
+apply (Z2R_le 0).
+omega.
+(* .. r <> 0 *)
+constructor.
+split.
+(* ... bounds *)
+apply Rle_lt_trans with (sqrt (Z2R (q * q))).
+rewrite Z2R_mult.
+fold (Rsqr (Z2R q)).
+rewrite sqrt_Rsqr.
+apply Rle_refl.
+apply (Z2R_le 0).
+omega.
+apply sqrt_lt_1.
+rewrite Z2R_mult.
+apply Rle_0_sqr.
+rewrite <- Hq.
+apply (Z2R_le 0).
+now apply Zlt_le_weak.
+apply Z2R_lt.
+omega.
+apply Rlt_le_trans with (sqrt (Z2R ((q + 1) * (q + 1)))).
+apply sqrt_lt_1.
+rewrite <- Hq.
+apply (Z2R_le 0).
+now apply Zlt_le_weak.
+rewrite Z2R_mult.
+apply Rle_0_sqr.
+apply Z2R_lt.
+ring_simplify.
+omega.
+rewrite Z2R_mult.
+fold (Rsqr (Z2R (q + 1))).
+rewrite sqrt_Rsqr.
+apply Rle_refl.
+apply (Z2R_le 0).
+omega.
+(* ... location *)
+rewrite Rcompare_half_r.
+rewrite <- Rcompare_sqr.
+replace ((2 * sqrt (Z2R (q * q + r))) * (2 * sqrt (Z2R (q * q + r))))%R
+ with (4 * Rsqr (sqrt (Z2R (q * q + r))))%R by (unfold Rsqr ; ring).
+rewrite Rsqr_sqrt.
+change 4%R with (Z2R 4).
+rewrite <- Z2R_plus, <- 2!Z2R_mult.
+rewrite Rcompare_Z2R.
+replace ((q + (q + 1)) * (q + (q + 1)))%Z with (4 * (q * q) + 4 * q + 1)%Z by ring.
+generalize (Zle_cases r q).
+case (Zle_bool r q) ; intros Hr''.
+change (4 * (q * q + r) < 4 * (q * q) + 4 * q + 1)%Z.
+omega.
+change (4 * (q * q + r) > 4 * (q * q) + 4 * q + 1)%Z.
+omega.
+rewrite <- Hq.
+apply (Z2R_le 0).
+now apply Zlt_le_weak.
+apply Rmult_le_pos.
+now apply (Z2R_le 0 2).
+apply sqrt_ge_0.
+rewrite <- Z2R_plus.
+apply (Z2R_le 0).
+omega.
+Qed.
+
+End Fcalc_sqrt.
diff --git a/flocq/Core/Fcore.v b/flocq/Core/Fcore.v
new file mode 100644
index 0000000..23ebb39
--- /dev/null
+++ b/flocq/Core/Fcore.v
@@ -0,0 +1,30 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** To ease the import *)
+Require Export Fcore_Raux.
+Require Export Fcore_defs.
+Require Export Fcore_float_prop.
+Require Export Fcore_rnd.
+Require Export Fcore_generic_fmt.
+Require Export Fcore_rnd_ne.
+Require Export Fcore_FIX.
+Require Export Fcore_FLX.
+Require Export Fcore_FLT.
+Require Export Fcore_ulp.
diff --git a/flocq/Core/Fcore_FIX.v b/flocq/Core/Fcore_FIX.v
new file mode 100644
index 0000000..f185ddf
--- /dev/null
+++ b/flocq/Core/Fcore_FIX.v
@@ -0,0 +1,87 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Fixed-point format *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_rnd_ne.
+
+Section RND_FIX.
+
+Variable beta : radix.
+
+Notation bpow := (bpow beta).
+
+Variable emin : Z.
+
+(* fixed-point format with exponent emin *)
+Definition FIX_format (x : R) :=
+ exists f : float beta,
+ x = F2R f /\ (Fexp f = emin)%Z.
+
+Definition FIX_exp (e : Z) := emin.
+
+(** Properties of the FIX format *)
+
+Global Instance FIX_exp_valid : Valid_exp FIX_exp.
+Proof.
+intros k.
+unfold FIX_exp.
+split ; intros H.
+now apply Zlt_le_weak.
+split.
+apply Zle_refl.
+now intros _ _.
+Qed.
+
+Theorem generic_format_FIX :
+ forall x, FIX_format x -> generic_format beta FIX_exp x.
+Proof.
+intros x ((xm, xe), (Hx1, Hx2)).
+rewrite Hx1.
+now apply generic_format_canonic.
+Qed.
+
+Theorem FIX_format_generic :
+ forall x, generic_format beta FIX_exp x -> FIX_format x.
+Proof.
+intros x H.
+rewrite H.
+eexists ; repeat split.
+Qed.
+
+Theorem FIX_format_satisfies_any :
+ satisfies_any FIX_format.
+Proof.
+refine (satisfies_any_eq _ _ _ (generic_format_satisfies_any beta FIX_exp)).
+intros x.
+split.
+apply FIX_format_generic.
+apply generic_format_FIX.
+Qed.
+
+Global Instance FIX_exp_monotone : Monotone_exp FIX_exp.
+Proof.
+intros ex ey H.
+apply Zle_refl.
+Qed.
+
+End RND_FIX.
diff --git a/flocq/Core/Fcore_FLT.v b/flocq/Core/Fcore_FLT.v
new file mode 100644
index 0000000..4ad4797
--- /dev/null
+++ b/flocq/Core/Fcore_FLT.v
@@ -0,0 +1,250 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Floating-point format with gradual underflow *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_float_prop.
+Require Import Fcore_FLX.
+Require Import Fcore_FIX.
+Require Import Fcore_rnd_ne.
+
+Section RND_FLT.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Variable emin prec : Z.
+
+Context { prec_gt_0_ : Prec_gt_0 prec }.
+
+(* floating-point format with gradual underflow *)
+Definition FLT_format (x : R) :=
+ exists f : float beta,
+ x = F2R f /\ (Zabs (Fnum f) < Zpower beta prec)%Z /\ (emin <= Fexp f)%Z.
+
+Definition FLT_exp e := Zmax (e - prec) emin.
+
+(** Properties of the FLT format *)
+Global Instance FLT_exp_valid : Valid_exp FLT_exp.
+Proof.
+intros k.
+unfold FLT_exp.
+generalize (prec_gt_0 prec).
+repeat split ;
+ intros ; zify ; omega.
+Qed.
+
+Theorem generic_format_FLT :
+ forall x, FLT_format x -> generic_format beta FLT_exp x.
+Proof.
+clear prec_gt_0_.
+intros x ((mx, ex), (H1, (H2, H3))).
+simpl in H2, H3.
+rewrite H1.
+apply generic_format_F2R.
+intros Zmx.
+unfold canonic_exp, FLT_exp.
+rewrite ln_beta_F2R with (1 := Zmx).
+apply Zmax_lub with (2 := H3).
+apply Zplus_le_reg_r with (prec - ex)%Z.
+ring_simplify.
+now apply ln_beta_le_Zpower.
+Qed.
+
+Theorem FLT_format_generic :
+ forall x, generic_format beta FLT_exp x -> FLT_format x.
+Proof.
+intros x.
+unfold generic_format.
+set (ex := canonic_exp beta FLT_exp x).
+set (mx := Ztrunc (scaled_mantissa beta FLT_exp x)).
+intros Hx.
+rewrite Hx.
+eexists ; repeat split ; simpl.
+apply lt_Z2R.
+rewrite Z2R_Zpower. 2: now apply Zlt_le_weak.
+apply Rmult_lt_reg_r with (bpow ex).
+apply bpow_gt_0.
+rewrite <- bpow_plus.
+change (F2R (Float beta (Zabs mx) ex) < bpow (prec + ex))%R.
+rewrite F2R_Zabs.
+rewrite <- Hx.
+destruct (Req_dec x 0) as [Hx0|Hx0].
+rewrite Hx0, Rabs_R0.
+apply bpow_gt_0.
+unfold canonic_exp in ex.
+destruct (ln_beta beta x) as (ex', He).
+simpl in ex.
+specialize (He Hx0).
+apply Rlt_le_trans with (1 := proj2 He).
+apply bpow_le.
+cut (ex' - prec <= ex)%Z. omega.
+unfold ex, FLT_exp.
+apply Zle_max_l.
+apply Zle_max_r.
+Qed.
+
+Theorem FLT_format_satisfies_any :
+ satisfies_any FLT_format.
+Proof.
+refine (satisfies_any_eq _ _ _ (generic_format_satisfies_any beta FLT_exp)).
+intros x.
+split.
+apply FLT_format_generic.
+apply generic_format_FLT.
+Qed.
+
+Theorem canonic_exp_FLT_FLX :
+ forall x, x <> R0 ->
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ canonic_exp beta FLT_exp x = canonic_exp beta (FLX_exp prec) x.
+Proof.
+intros x Hx0 Hx.
+unfold canonic_exp.
+apply Zmax_left.
+destruct (ln_beta beta x) as (ex, He).
+unfold FLX_exp. simpl.
+specialize (He Hx0).
+cut (emin + prec - 1 < ex)%Z. omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (1 := Hx).
+apply He.
+Qed.
+
+(** Links between FLT and FLX *)
+Theorem generic_format_FLT_FLX :
+ forall x : R,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ generic_format beta (FLX_exp prec) x ->
+ generic_format beta FLT_exp x.
+Proof.
+intros x Hx H.
+destruct (Req_dec x 0) as [Hx0|Hx0].
+rewrite Hx0.
+apply generic_format_0.
+unfold generic_format, scaled_mantissa.
+now rewrite canonic_exp_FLT_FLX.
+Qed.
+
+Theorem generic_format_FLX_FLT :
+ forall x : R,
+ generic_format beta FLT_exp x -> generic_format beta (FLX_exp prec) x.
+Proof.
+clear prec_gt_0_.
+intros x Hx.
+unfold generic_format in Hx; rewrite Hx.
+apply generic_format_F2R.
+intros _.
+rewrite <- Hx.
+unfold canonic_exp, FLX_exp, FLT_exp.
+apply Zle_max_l.
+Qed.
+
+Theorem round_FLT_FLX : forall rnd x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ round beta FLT_exp rnd x = round beta (FLX_exp prec) rnd x.
+intros rnd x Hx.
+unfold round, scaled_mantissa.
+rewrite canonic_exp_FLT_FLX ; trivial.
+contradict Hx.
+rewrite Hx, Rabs_R0.
+apply Rlt_not_le.
+apply bpow_gt_0.
+Qed.
+
+(** Links between FLT and FIX (underflow) *)
+Theorem canonic_exp_FLT_FIX :
+ forall x, x <> R0 ->
+ (Rabs x < bpow (emin + prec))%R ->
+ canonic_exp beta FLT_exp x = canonic_exp beta (FIX_exp emin) x.
+Proof.
+intros x Hx0 Hx.
+unfold canonic_exp.
+apply Zmax_right.
+unfold FIX_exp.
+destruct (ln_beta beta x) as (ex, Hex).
+simpl.
+cut (ex - 1 < emin + prec)%Z. omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := Hx).
+now apply Hex.
+Qed.
+
+Theorem generic_format_FIX_FLT :
+ forall x : R,
+ generic_format beta FLT_exp x ->
+ generic_format beta (FIX_exp emin) x.
+Proof.
+clear prec_gt_0_.
+intros x Hx.
+rewrite Hx.
+apply generic_format_F2R.
+intros _.
+rewrite <- Hx.
+apply Zle_max_r.
+Qed.
+
+Theorem generic_format_FLT_FIX :
+ forall x : R,
+ (Rabs x <= bpow (emin + prec))%R ->
+ generic_format beta (FIX_exp emin) x ->
+ generic_format beta FLT_exp x.
+Proof with auto with typeclass_instances.
+clear prec_gt_0_.
+apply generic_inclusion_le...
+intros e He.
+unfold FIX_exp.
+apply Zmax_lub.
+omega.
+apply Zle_refl.
+Qed.
+
+(** FLT is a nice format: it has a monotone exponent... *)
+Global Instance FLT_exp_monotone : Monotone_exp FLT_exp.
+Proof.
+intros ex ey.
+unfold FLT_exp.
+zify ; omega.
+Qed.
+
+(** and it allows a rounding to nearest, ties to even. *)
+Hypothesis NE_prop : Zeven beta = false \/ (1 < prec)%Z.
+
+Global Instance exists_NE_FLT : Exists_NE beta FLT_exp.
+Proof.
+destruct NE_prop as [H|H].
+now left.
+right.
+intros e.
+unfold FLT_exp.
+destruct (Zmax_spec (e - prec) emin) as [(H1,H2)|(H1,H2)] ;
+ rewrite H2 ; clear H2.
+generalize (Zmax_spec (e + 1 - prec) emin).
+generalize (Zmax_spec (e - prec + 1 - prec) emin).
+omega.
+generalize (Zmax_spec (e + 1 - prec) emin).
+generalize (Zmax_spec (emin + 1 - prec) emin).
+omega.
+Qed.
+
+End RND_FLT.
diff --git a/flocq/Core/Fcore_FLX.v b/flocq/Core/Fcore_FLX.v
new file mode 100644
index 0000000..62ecb6f
--- /dev/null
+++ b/flocq/Core/Fcore_FLX.v
@@ -0,0 +1,233 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Floating-point format without underflow *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_float_prop.
+Require Import Fcore_FIX.
+Require Import Fcore_rnd_ne.
+
+Section RND_FLX.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Variable prec : Z.
+
+Class Prec_gt_0 :=
+ prec_gt_0 : (0 < prec)%Z.
+
+Context { prec_gt_0_ : Prec_gt_0 }.
+
+(* unbounded floating-point format *)
+Definition FLX_format (x : R) :=
+ exists f : float beta,
+ x = F2R f /\ (Zabs (Fnum f) < Zpower beta prec)%Z.
+
+Definition FLX_exp (e : Z) := (e - prec)%Z.
+
+(** Properties of the FLX format *)
+
+Global Instance FLX_exp_valid : Valid_exp FLX_exp.
+Proof.
+intros k.
+unfold FLX_exp.
+generalize prec_gt_0.
+repeat split ; intros ; omega.
+Qed.
+
+Theorem FIX_format_FLX :
+ forall x e,
+ (bpow (e - 1) <= Rabs x <= bpow e)%R ->
+ FLX_format x ->
+ FIX_format beta (e - prec) x.
+Proof.
+clear prec_gt_0_.
+intros x e Hx ((xm, xe), (H1, H2)).
+rewrite H1, (F2R_prec_normalize beta xm xe e prec).
+now eexists.
+exact H2.
+now rewrite <- H1.
+Qed.
+
+Theorem FLX_format_generic :
+ forall x, generic_format beta FLX_exp x -> FLX_format x.
+Proof.
+intros x H.
+rewrite H.
+unfold FLX_format.
+eexists ; repeat split.
+simpl.
+apply lt_Z2R.
+rewrite Z2R_abs.
+rewrite <- scaled_mantissa_generic with (1 := H).
+rewrite <- scaled_mantissa_abs.
+apply Rmult_lt_reg_r with (bpow (canonic_exp beta FLX_exp (Rabs x))).
+apply bpow_gt_0.
+rewrite scaled_mantissa_mult_bpow.
+rewrite Z2R_Zpower, <- bpow_plus.
+2: now apply Zlt_le_weak.
+unfold canonic_exp, FLX_exp.
+ring_simplify (prec + (ln_beta beta (Rabs x) - prec))%Z.
+rewrite ln_beta_abs.
+destruct (Req_dec x 0) as [Hx|Hx].
+rewrite Hx, Rabs_R0.
+apply bpow_gt_0.
+destruct (ln_beta beta x) as (ex, Ex).
+now apply Ex.
+Qed.
+
+Theorem generic_format_FLX :
+ forall x, FLX_format x -> generic_format beta FLX_exp x.
+Proof.
+clear prec_gt_0_.
+intros x ((mx,ex),(H1,H2)).
+simpl in H2.
+rewrite H1.
+apply generic_format_F2R.
+intros Zmx.
+unfold canonic_exp, FLX_exp.
+rewrite ln_beta_F2R with (1 := Zmx).
+apply Zplus_le_reg_r with (prec - ex)%Z.
+ring_simplify.
+now apply ln_beta_le_Zpower.
+Qed.
+
+Theorem FLX_format_satisfies_any :
+ satisfies_any FLX_format.
+Proof.
+refine (satisfies_any_eq _ _ _ (generic_format_satisfies_any beta FLX_exp)).
+intros x.
+split.
+apply FLX_format_generic.
+apply generic_format_FLX.
+Qed.
+
+Theorem FLX_format_FIX :
+ forall x e,
+ (bpow (e - 1) <= Rabs x <= bpow e)%R ->
+ FIX_format beta (e - prec) x ->
+ FLX_format x.
+Proof with auto with typeclass_instances.
+intros x e Hx Fx.
+apply FLX_format_generic.
+apply generic_format_FIX in Fx.
+revert Fx.
+apply generic_inclusion with (e := e)...
+apply Zle_refl.
+Qed.
+
+(** unbounded floating-point format with normal mantissas *)
+Definition FLXN_format (x : R) :=
+ exists f : float beta,
+ x = F2R f /\ (x <> R0 ->
+ Zpower beta (prec - 1) <= Zabs (Fnum f) < Zpower beta prec)%Z.
+
+Theorem generic_format_FLXN :
+ forall x, FLXN_format x -> generic_format beta FLX_exp x.
+Proof.
+intros x ((xm,ex),(H1,H2)).
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx.
+apply generic_format_0.
+specialize (H2 Zx).
+apply generic_format_FLX.
+rewrite H1.
+eexists ; repeat split.
+apply H2.
+Qed.
+
+Theorem FLXN_format_generic :
+ forall x, generic_format beta FLX_exp x -> FLXN_format x.
+Proof.
+intros x Hx.
+rewrite Hx.
+simpl.
+eexists ; split. split.
+simpl.
+rewrite <- Hx.
+intros Zx.
+split.
+(* *)
+apply le_Z2R.
+rewrite Z2R_Zpower.
+2: now apply Zlt_0_le_0_pred.
+rewrite Z2R_abs, <- scaled_mantissa_generic with (1 := Hx).
+apply Rmult_le_reg_r with (bpow (canonic_exp beta FLX_exp x)).
+apply bpow_gt_0.
+rewrite <- bpow_plus.
+rewrite <- scaled_mantissa_abs.
+rewrite <- canonic_exp_abs.
+rewrite scaled_mantissa_mult_bpow.
+unfold canonic_exp, FLX_exp.
+rewrite ln_beta_abs.
+ring_simplify (prec - 1 + (ln_beta beta x - prec))%Z.
+destruct (ln_beta beta x) as (ex,Ex).
+now apply Ex.
+(* *)
+apply lt_Z2R.
+rewrite Z2R_Zpower.
+2: now apply Zlt_le_weak.
+rewrite Z2R_abs, <- scaled_mantissa_generic with (1 := Hx).
+apply Rmult_lt_reg_r with (bpow (canonic_exp beta FLX_exp x)).
+apply bpow_gt_0.
+rewrite <- bpow_plus.
+rewrite <- scaled_mantissa_abs.
+rewrite <- canonic_exp_abs.
+rewrite scaled_mantissa_mult_bpow.
+unfold canonic_exp, FLX_exp.
+rewrite ln_beta_abs.
+ring_simplify (prec + (ln_beta beta x - prec))%Z.
+destruct (ln_beta beta x) as (ex,Ex).
+now apply Ex.
+Qed.
+
+Theorem FLXN_format_satisfies_any :
+ satisfies_any FLXN_format.
+Proof.
+refine (satisfies_any_eq _ _ _ (generic_format_satisfies_any beta FLX_exp)).
+split ; intros H.
+now apply FLXN_format_generic.
+now apply generic_format_FLXN.
+Qed.
+
+(** FLX is a nice format: it has a monotone exponent... *)
+Global Instance FLX_exp_monotone : Monotone_exp FLX_exp.
+Proof.
+intros ex ey Hxy.
+now apply Zplus_le_compat_r.
+Qed.
+
+(** and it allows a rounding to nearest, ties to even. *)
+Hypothesis NE_prop : Zeven beta = false \/ (1 < prec)%Z.
+
+Global Instance exists_NE_FLX : Exists_NE beta FLX_exp.
+Proof.
+destruct NE_prop as [H|H].
+now left.
+right.
+unfold FLX_exp.
+split ; omega.
+Qed.
+
+End RND_FLX.
diff --git a/flocq/Core/Fcore_FTZ.v b/flocq/Core/Fcore_FTZ.v
new file mode 100644
index 0000000..5356c11
--- /dev/null
+++ b/flocq/Core/Fcore_FTZ.v
@@ -0,0 +1,330 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Floating-point format with abrupt underflow *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_float_prop.
+Require Import Fcore_FLX.
+
+Section RND_FTZ.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Variable emin prec : Z.
+
+Context { prec_gt_0_ : Prec_gt_0 prec }.
+
+(* floating-point format with abrupt underflow *)
+Definition FTZ_format (x : R) :=
+ exists f : float beta,
+ x = F2R f /\ (x <> R0 -> Zpower beta (prec - 1) <= Zabs (Fnum f) < Zpower beta prec)%Z /\
+ (emin <= Fexp f)%Z.
+
+Definition FTZ_exp e := if Zlt_bool (e - prec) emin then (emin + prec - 1)%Z else (e - prec)%Z.
+
+(** Properties of the FTZ format *)
+Global Instance FTZ_exp_valid : Valid_exp FTZ_exp.
+Proof.
+intros k.
+unfold FTZ_exp.
+generalize (Zlt_cases (k - prec) emin).
+case (Zlt_bool (k - prec) emin) ; intros H1.
+split ; intros H2.
+omega.
+split.
+generalize (Zlt_cases (emin + prec + 1 - prec) emin).
+case (Zlt_bool (emin + prec + 1 - prec) emin) ; intros H3.
+omega.
+generalize (Zlt_cases (emin + prec - 1 + 1 - prec) emin).
+generalize (prec_gt_0 prec).
+case (Zlt_bool (emin + prec - 1 + 1 - prec) emin) ; omega.
+intros l H3.
+generalize (Zlt_cases (l - prec) emin).
+case (Zlt_bool (l - prec) emin) ; omega.
+split ; intros H2.
+generalize (Zlt_cases (k + 1 - prec) emin).
+case (Zlt_bool (k + 1 - prec) emin) ; omega.
+generalize (prec_gt_0 prec).
+split ; intros ; omega.
+Qed.
+
+Theorem FLXN_format_FTZ :
+ forall x, FTZ_format x -> FLXN_format beta prec x.
+Proof.
+intros x ((xm, xe), (Hx1, (Hx2, Hx3))).
+eexists.
+apply (conj Hx1 Hx2).
+Qed.
+
+Theorem generic_format_FTZ :
+ forall x, FTZ_format x -> generic_format beta FTZ_exp x.
+Proof.
+intros x Hx.
+cut (generic_format beta (FLX_exp prec) x).
+apply generic_inclusion_ln_beta.
+intros Zx.
+destruct Hx as ((xm, xe), (Hx1, (Hx2, Hx3))).
+simpl in Hx2, Hx3.
+specialize (Hx2 Zx).
+assert (Zxm: xm <> Z0).
+contradict Zx.
+rewrite Hx1, Zx.
+apply F2R_0.
+unfold FTZ_exp, FLX_exp.
+rewrite Zlt_bool_false.
+apply Zle_refl.
+rewrite Hx1, ln_beta_F2R with (1 := Zxm).
+cut (prec - 1 < ln_beta beta (Z2R xm))%Z.
+clear -Hx3 ; omega.
+apply ln_beta_gt_Zpower with (1 := Zxm).
+apply Hx2.
+apply generic_format_FLXN.
+now apply FLXN_format_FTZ.
+Qed.
+
+Theorem FTZ_format_generic :
+ forall x, generic_format beta FTZ_exp x -> FTZ_format x.
+Proof.
+intros x Hx.
+destruct (Req_dec x 0) as [Hx3|Hx3].
+exists (Float beta 0 emin).
+split.
+unfold F2R. simpl.
+now rewrite Rmult_0_l.
+split.
+intros H.
+now elim H.
+apply Zle_refl.
+unfold generic_format, scaled_mantissa, canonic_exp, FTZ_exp in Hx.
+destruct (ln_beta beta x) as (ex, Hx4).
+simpl in Hx.
+specialize (Hx4 Hx3).
+generalize (Zlt_cases (ex - prec) emin) Hx. clear Hx.
+case (Zlt_bool (ex - prec) emin) ; intros Hx5 Hx2.
+elim Rlt_not_ge with (1 := proj2 Hx4).
+apply Rle_ge.
+rewrite Hx2, <- F2R_Zabs.
+rewrite <- (Rmult_1_l (bpow ex)).
+unfold F2R. simpl.
+apply Rmult_le_compat.
+now apply (Z2R_le 0 1).
+apply bpow_ge_0.
+apply (Z2R_le 1).
+apply (Zlt_le_succ 0).
+apply lt_Z2R.
+apply Rmult_lt_reg_r with (bpow (emin + prec - 1)).
+apply bpow_gt_0.
+rewrite Rmult_0_l.
+change (0 < F2R (Float beta (Zabs (Ztrunc (x * bpow (- (emin + prec - 1))))) (emin + prec - 1)))%R.
+rewrite F2R_Zabs, <- Hx2.
+now apply Rabs_pos_lt.
+apply bpow_le.
+omega.
+rewrite Hx2.
+eexists ; repeat split ; simpl.
+apply le_Z2R.
+rewrite Z2R_Zpower.
+apply Rmult_le_reg_r with (bpow (ex - prec)).
+apply bpow_gt_0.
+rewrite <- bpow_plus.
+replace (prec - 1 + (ex - prec))%Z with (ex - 1)%Z by ring.
+change (bpow (ex - 1) <= F2R (Float beta (Zabs (Ztrunc (x * bpow (- (ex - prec))))) (ex - prec)))%R.
+rewrite F2R_Zabs, <- Hx2.
+apply Hx4.
+apply Zle_minus_le_0.
+now apply (Zlt_le_succ 0).
+apply lt_Z2R.
+rewrite Z2R_Zpower.
+apply Rmult_lt_reg_r with (bpow (ex - prec)).
+apply bpow_gt_0.
+rewrite <- bpow_plus.
+replace (prec + (ex - prec))%Z with ex by ring.
+change (F2R (Float beta (Zabs (Ztrunc (x * bpow (- (ex - prec))))) (ex - prec)) < bpow ex)%R.
+rewrite F2R_Zabs, <- Hx2.
+apply Hx4.
+now apply Zlt_le_weak.
+now apply Zge_le.
+Qed.
+
+Theorem FTZ_format_satisfies_any :
+ satisfies_any FTZ_format.
+Proof.
+refine (satisfies_any_eq _ _ _ (generic_format_satisfies_any beta FTZ_exp)).
+intros x.
+split.
+apply FTZ_format_generic.
+apply generic_format_FTZ.
+Qed.
+
+Theorem FTZ_format_FLXN :
+ forall x : R,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ FLXN_format beta prec x -> FTZ_format x.
+Proof.
+clear prec_gt_0_.
+intros x Hx Fx.
+apply FTZ_format_generic.
+apply generic_format_FLXN in Fx.
+revert Hx Fx.
+apply generic_inclusion_ge.
+intros e He.
+unfold FTZ_exp.
+rewrite Zlt_bool_false.
+apply Zle_refl.
+omega.
+Qed.
+
+Section FTZ_round.
+
+(** Rounding with FTZ *)
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Definition Zrnd_FTZ x :=
+ if Rle_bool R1 (Rabs x) then rnd x else Z0.
+
+Global Instance valid_rnd_FTZ : Valid_rnd Zrnd_FTZ.
+Proof with auto with typeclass_instances.
+split.
+(* *)
+intros x y Hxy.
+unfold Zrnd_FTZ.
+case Rle_bool_spec ; intros Hx ;
+ case Rle_bool_spec ; intros Hy.
+4: easy.
+(* 1 <= |x| *)
+now apply Zrnd_le.
+rewrite <- (Zrnd_Z2R rnd 0).
+apply Zrnd_le...
+apply Rle_trans with (Z2R (-1)). 2: now apply Z2R_le.
+destruct (Rabs_ge_inv _ _ Hx) as [Hx1|Hx1].
+exact Hx1.
+elim Rle_not_lt with (1 := Hx1).
+apply Rle_lt_trans with (2 := Hy).
+apply Rle_trans with (1 := Hxy).
+apply RRle_abs.
+(* |x| < 1 *)
+rewrite <- (Zrnd_Z2R rnd 0).
+apply Zrnd_le...
+apply Rle_trans with (Z2R 1).
+now apply Z2R_le.
+destruct (Rabs_ge_inv _ _ Hy) as [Hy1|Hy1].
+elim Rle_not_lt with (1 := Hy1).
+apply Rlt_le_trans with (2 := Hxy).
+apply (Rabs_def2 _ _ Hx).
+exact Hy1.
+(* *)
+intros n.
+unfold Zrnd_FTZ.
+rewrite Zrnd_Z2R...
+case Rle_bool_spec.
+easy.
+rewrite <- Z2R_abs.
+intros H.
+generalize (lt_Z2R _ 1 H).
+clear.
+now case n ; trivial ; simpl ; intros [p|p|].
+Qed.
+
+Theorem round_FTZ_FLX :
+ forall x : R,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ round beta FTZ_exp Zrnd_FTZ x = round beta (FLX_exp prec) rnd x.
+Proof.
+intros x Hx.
+unfold round, scaled_mantissa, canonic_exp.
+destruct (ln_beta beta x) as (ex, He). simpl.
+assert (Hx0: x <> R0).
+intros Hx0.
+apply Rle_not_lt with (1 := Hx).
+rewrite Hx0, Rabs_R0.
+apply bpow_gt_0.
+specialize (He Hx0).
+assert (He': (emin + prec <= ex)%Z).
+apply (bpow_lt_bpow beta).
+apply Rle_lt_trans with (1 := Hx).
+apply He.
+replace (FTZ_exp ex) with (FLX_exp prec ex).
+unfold Zrnd_FTZ.
+rewrite Rle_bool_true.
+apply refl_equal.
+rewrite Rabs_mult.
+rewrite (Rabs_pos_eq (bpow (- FLX_exp prec ex))).
+change R1 with (bpow 0).
+rewrite <- (Zplus_opp_r (FLX_exp prec ex)).
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Rle_trans with (2 := proj1 He).
+apply bpow_le.
+unfold FLX_exp.
+generalize (prec_gt_0 prec).
+clear -He' ; omega.
+apply bpow_ge_0.
+unfold FLX_exp, FTZ_exp.
+rewrite Zlt_bool_false.
+apply refl_equal.
+clear -He' ; omega.
+Qed.
+
+Theorem round_FTZ_small :
+ forall x : R,
+ (Rabs x < bpow (emin + prec - 1))%R ->
+ round beta FTZ_exp Zrnd_FTZ x = R0.
+Proof with auto with typeclass_instances.
+intros x Hx.
+destruct (Req_dec x 0) as [Hx0|Hx0].
+rewrite Hx0.
+apply round_0...
+unfold round, scaled_mantissa, canonic_exp.
+destruct (ln_beta beta x) as (ex, He). simpl.
+specialize (He Hx0).
+unfold Zrnd_FTZ.
+rewrite Rle_bool_false.
+apply F2R_0.
+rewrite Rabs_mult.
+rewrite (Rabs_pos_eq (bpow (- FTZ_exp ex))).
+change R1 with (bpow 0).
+rewrite <- (Zplus_opp_r (FTZ_exp ex)).
+rewrite bpow_plus.
+apply Rmult_lt_compat_r.
+apply bpow_gt_0.
+apply Rlt_le_trans with (1 := Hx).
+apply bpow_le.
+unfold FTZ_exp.
+generalize (Zlt_cases (ex - prec) emin).
+case Zlt_bool.
+intros _.
+apply Zle_refl.
+intros He'.
+elim Rlt_not_le with (1 := Hx).
+apply Rle_trans with (2 := proj1 He).
+apply bpow_le.
+omega.
+apply bpow_ge_0.
+Qed.
+
+End FTZ_round.
+
+End RND_FTZ.
diff --git a/flocq/Core/Fcore_Raux.v b/flocq/Core/Fcore_Raux.v
new file mode 100644
index 0000000..748e36e
--- /dev/null
+++ b/flocq/Core/Fcore_Raux.v
@@ -0,0 +1,1996 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Missing definitions/lemmas *)
+Require Export Reals.
+Require Export ZArith.
+Require Export Fcore_Zaux.
+
+Section Rmissing.
+
+(** About R *)
+Theorem Rle_0_minus :
+ forall x y, (x <= y)%R -> (0 <= y - x)%R.
+Proof.
+intros.
+apply Rge_le.
+apply Rge_minus.
+now apply Rle_ge.
+Qed.
+
+Theorem Rabs_eq_Rabs :
+ forall x y : R,
+ Rabs x = Rabs y -> x = y \/ x = Ropp y.
+Proof.
+intros x y H.
+unfold Rabs in H.
+destruct (Rcase_abs x) as [_|_].
+assert (H' := f_equal Ropp H).
+rewrite Ropp_involutive in H'.
+rewrite H'.
+destruct (Rcase_abs y) as [_|_].
+left.
+apply Ropp_involutive.
+now right.
+rewrite H.
+now destruct (Rcase_abs y) as [_|_] ; [right|left].
+Qed.
+
+Theorem Rabs_minus_le:
+ forall x y : R,
+ (0 <= y)%R -> (y <= 2*x)%R ->
+ (Rabs (x-y) <= x)%R.
+Proof.
+intros x y Hx Hy.
+unfold Rabs; case (Rcase_abs (x - y)); intros H.
+apply Rplus_le_reg_l with x; ring_simplify; assumption.
+apply Rplus_le_reg_l with (-x)%R; ring_simplify.
+auto with real.
+Qed.
+
+Theorem Rplus_eq_reg_r :
+ forall r r1 r2 : R,
+ (r1 + r = r2 + r)%R -> (r1 = r2)%R.
+Proof.
+intros r r1 r2 H.
+apply Rplus_eq_reg_l with r.
+now rewrite 2!(Rplus_comm r).
+Qed.
+
+Theorem Rplus_le_reg_r :
+ forall r r1 r2 : R,
+ (r1 + r <= r2 + r)%R -> (r1 <= r2)%R.
+Proof.
+intros.
+apply Rplus_le_reg_l with r.
+now rewrite 2!(Rplus_comm r).
+Qed.
+
+Theorem Rmult_lt_reg_r :
+ forall r r1 r2 : R, (0 < r)%R ->
+ (r1 * r < r2 * r)%R -> (r1 < r2)%R.
+Proof.
+intros.
+apply Rmult_lt_reg_l with r.
+exact H.
+now rewrite 2!(Rmult_comm r).
+Qed.
+
+Theorem Rmult_le_reg_r :
+ forall r r1 r2 : R, (0 < r)%R ->
+ (r1 * r <= r2 * r)%R -> (r1 <= r2)%R.
+Proof.
+intros.
+apply Rmult_le_reg_l with r.
+exact H.
+now rewrite 2!(Rmult_comm r).
+Qed.
+
+Theorem Rmult_eq_reg_r :
+ forall r r1 r2 : R, (r1 * r)%R = (r2 * r)%R ->
+ r <> 0%R -> r1 = r2.
+Proof.
+intros r r1 r2 H1 H2.
+apply Rmult_eq_reg_l with r.
+now rewrite 2!(Rmult_comm r).
+exact H2.
+Qed.
+
+Theorem Rmult_minus_distr_r :
+ forall r r1 r2 : R,
+ ((r1 - r2) * r = r1 * r - r2 * r)%R.
+Proof.
+intros r r1 r2.
+rewrite <- 3!(Rmult_comm r).
+apply Rmult_minus_distr_l.
+Qed.
+
+Theorem Rmult_min_distr_r :
+ forall r r1 r2 : R,
+ (0 <= r)%R ->
+ (Rmin r1 r2 * r)%R = Rmin (r1 * r) (r2 * r).
+Proof.
+intros r r1 r2 [Hr|Hr].
+unfold Rmin.
+destruct (Rle_dec r1 r2) as [H1|H1] ;
+ destruct (Rle_dec (r1 * r) (r2 * r)) as [H2|H2] ;
+ try easy.
+apply (f_equal (fun x => Rmult x r)).
+apply Rle_antisym.
+exact H1.
+apply Rmult_le_reg_r with (1 := Hr).
+apply Rlt_le.
+now apply Rnot_le_lt.
+apply Rle_antisym.
+apply Rmult_le_compat_r.
+now apply Rlt_le.
+apply Rlt_le.
+now apply Rnot_le_lt.
+exact H2.
+rewrite <- Hr.
+rewrite 3!Rmult_0_r.
+unfold Rmin.
+destruct (Rle_dec 0 0) as [H0|H0].
+easy.
+elim H0.
+apply Rle_refl.
+Qed.
+
+Theorem Rmult_min_distr_l :
+ forall r r1 r2 : R,
+ (0 <= r)%R ->
+ (r * Rmin r1 r2)%R = Rmin (r * r1) (r * r2).
+Proof.
+intros r r1 r2 Hr.
+rewrite 3!(Rmult_comm r).
+now apply Rmult_min_distr_r.
+Qed.
+
+Theorem exp_le :
+ forall x y : R,
+ (x <= y)%R -> (exp x <= exp y)%R.
+Proof.
+intros x y [H|H].
+apply Rlt_le.
+now apply exp_increasing.
+rewrite H.
+apply Rle_refl.
+Qed.
+
+Theorem Rinv_lt :
+ forall x y,
+ (0 < x)%R -> (x < y)%R -> (/y < /x)%R.
+Proof.
+intros x y Hx Hxy.
+apply Rinv_lt_contravar.
+apply Rmult_lt_0_compat.
+exact Hx.
+now apply Rlt_trans with x.
+exact Hxy.
+Qed.
+
+Theorem Rinv_le :
+ forall x y,
+ (0 < x)%R -> (x <= y)%R -> (/y <= /x)%R.
+Proof.
+intros x y Hx Hxy.
+apply Rle_Rinv.
+exact Hx.
+now apply Rlt_le_trans with x.
+exact Hxy.
+Qed.
+
+Theorem sqrt_ge_0 :
+ forall x : R,
+ (0 <= sqrt x)%R.
+Proof.
+intros x.
+unfold sqrt.
+destruct (Rcase_abs x) as [_|H].
+apply Rle_refl.
+apply Rsqrt_positivity.
+Qed.
+
+Theorem Rabs_le :
+ forall x y,
+ (-y <= x <= y)%R -> (Rabs x <= y)%R.
+Proof.
+intros x y (Hyx,Hxy).
+unfold Rabs.
+case Rcase_abs ; intros Hx.
+apply Ropp_le_cancel.
+now rewrite Ropp_involutive.
+exact Hxy.
+Qed.
+
+Theorem Rabs_le_inv :
+ forall x y,
+ (Rabs x <= y)%R -> (-y <= x <= y)%R.
+Proof.
+intros x y Hxy.
+split.
+apply Rle_trans with (- Rabs x)%R.
+now apply Ropp_le_contravar.
+apply Ropp_le_cancel.
+rewrite Ropp_involutive, <- Rabs_Ropp.
+apply RRle_abs.
+apply Rle_trans with (2 := Hxy).
+apply RRle_abs.
+Qed.
+
+Theorem Rabs_ge :
+ forall x y,
+ (y <= -x \/ x <= y)%R -> (x <= Rabs y)%R.
+Proof.
+intros x y [Hyx|Hxy].
+apply Rle_trans with (-y)%R.
+apply Ropp_le_cancel.
+now rewrite Ropp_involutive.
+rewrite <- Rabs_Ropp.
+apply RRle_abs.
+apply Rle_trans with (1 := Hxy).
+apply RRle_abs.
+Qed.
+
+Theorem Rabs_ge_inv :
+ forall x y,
+ (x <= Rabs y)%R -> (y <= -x \/ x <= y)%R.
+Proof.
+intros x y.
+unfold Rabs.
+case Rcase_abs ; intros Hy Hxy.
+left.
+apply Ropp_le_cancel.
+now rewrite Ropp_involutive.
+now right.
+Qed.
+
+Theorem Rabs_lt :
+ forall x y,
+ (-y < x < y)%R -> (Rabs x < y)%R.
+Proof.
+intros x y (Hyx,Hxy).
+now apply Rabs_def1.
+Qed.
+
+Theorem Rabs_lt_inv :
+ forall x y,
+ (Rabs x < y)%R -> (-y < x < y)%R.
+Proof.
+intros x y H.
+now split ; eapply Rabs_def2.
+Qed.
+
+Theorem Rabs_gt :
+ forall x y,
+ (y < -x \/ x < y)%R -> (x < Rabs y)%R.
+Proof.
+intros x y [Hyx|Hxy].
+rewrite <- Rabs_Ropp.
+apply Rlt_le_trans with (Ropp y).
+apply Ropp_lt_cancel.
+now rewrite Ropp_involutive.
+apply RRle_abs.
+apply Rlt_le_trans with (1 := Hxy).
+apply RRle_abs.
+Qed.
+
+Theorem Rabs_gt_inv :
+ forall x y,
+ (x < Rabs y)%R -> (y < -x \/ x < y)%R.
+Proof.
+intros x y.
+unfold Rabs.
+case Rcase_abs ; intros Hy Hxy.
+left.
+apply Ropp_lt_cancel.
+now rewrite Ropp_involutive.
+now right.
+Qed.
+
+End Rmissing.
+
+Section Z2R.
+
+(** Z2R function (Z -> R) *)
+Fixpoint P2R (p : positive) :=
+ match p with
+ | xH => 1%R
+ | xO xH => 2%R
+ | xO t => (2 * P2R t)%R
+ | xI xH => 3%R
+ | xI t => (1 + 2 * P2R t)%R
+ end.
+
+Definition Z2R n :=
+ match n with
+ | Zpos p => P2R p
+ | Zneg p => Ropp (P2R p)
+ | Z0 => R0
+ end.
+
+Theorem P2R_INR :
+ forall n, P2R n = INR (nat_of_P n).
+Proof.
+induction n ; simpl ; try (
+ rewrite IHn ;
+ rewrite <- (mult_INR 2) ;
+ rewrite <- (nat_of_P_mult_morphism 2) ;
+ change (2 * n)%positive with (xO n)).
+(* xI *)
+rewrite (Rplus_comm 1).
+change (nat_of_P (xO n)) with (Pmult_nat n 2).
+case n ; intros ; simpl ; try apply refl_equal.
+case (Pmult_nat p 4) ; intros ; try apply refl_equal.
+rewrite Rplus_0_l.
+apply refl_equal.
+apply Rplus_comm.
+(* xO *)
+case n ; intros ; apply refl_equal.
+(* xH *)
+apply refl_equal.
+Qed.
+
+Theorem Z2R_IZR :
+ forall n, Z2R n = IZR n.
+Proof.
+intro.
+case n ; intros ; simpl.
+apply refl_equal.
+apply P2R_INR.
+apply Ropp_eq_compat.
+apply P2R_INR.
+Qed.
+
+Theorem Z2R_opp :
+ forall n, Z2R (-n) = (- Z2R n)%R.
+Proof.
+intros.
+repeat rewrite Z2R_IZR.
+apply Ropp_Ropp_IZR.
+Qed.
+
+Theorem Z2R_plus :
+ forall m n, (Z2R (m + n) = Z2R m + Z2R n)%R.
+Proof.
+intros.
+repeat rewrite Z2R_IZR.
+apply plus_IZR.
+Qed.
+
+Theorem minus_IZR :
+ forall n m : Z,
+ IZR (n - m) = (IZR n - IZR m)%R.
+Proof.
+intros.
+unfold Zminus.
+rewrite plus_IZR.
+rewrite Ropp_Ropp_IZR.
+apply refl_equal.
+Qed.
+
+Theorem Z2R_minus :
+ forall m n, (Z2R (m - n) = Z2R m - Z2R n)%R.
+Proof.
+intros.
+repeat rewrite Z2R_IZR.
+apply minus_IZR.
+Qed.
+
+Theorem Z2R_mult :
+ forall m n, (Z2R (m * n) = Z2R m * Z2R n)%R.
+Proof.
+intros.
+repeat rewrite Z2R_IZR.
+apply mult_IZR.
+Qed.
+
+Theorem le_Z2R :
+ forall m n, (Z2R m <= Z2R n)%R -> (m <= n)%Z.
+Proof.
+intros m n.
+repeat rewrite Z2R_IZR.
+apply le_IZR.
+Qed.
+
+Theorem Z2R_le :
+ forall m n, (m <= n)%Z -> (Z2R m <= Z2R n)%R.
+Proof.
+intros m n.
+repeat rewrite Z2R_IZR.
+apply IZR_le.
+Qed.
+
+Theorem lt_Z2R :
+ forall m n, (Z2R m < Z2R n)%R -> (m < n)%Z.
+Proof.
+intros m n.
+repeat rewrite Z2R_IZR.
+apply lt_IZR.
+Qed.
+
+Theorem Z2R_lt :
+ forall m n, (m < n)%Z -> (Z2R m < Z2R n)%R.
+Proof.
+intros m n.
+repeat rewrite Z2R_IZR.
+apply IZR_lt.
+Qed.
+
+Theorem Z2R_le_lt :
+ forall m n p, (m <= n < p)%Z -> (Z2R m <= Z2R n < Z2R p)%R.
+Proof.
+intros m n p (H1, H2).
+split.
+now apply Z2R_le.
+now apply Z2R_lt.
+Qed.
+
+Theorem le_lt_Z2R :
+ forall m n p, (Z2R m <= Z2R n < Z2R p)%R -> (m <= n < p)%Z.
+Proof.
+intros m n p (H1, H2).
+split.
+now apply le_Z2R.
+now apply lt_Z2R.
+Qed.
+
+Theorem eq_Z2R :
+ forall m n, (Z2R m = Z2R n)%R -> (m = n)%Z.
+Proof.
+intros m n H.
+apply eq_IZR.
+now rewrite <- 2!Z2R_IZR.
+Qed.
+
+Theorem neq_Z2R :
+ forall m n, (Z2R m <> Z2R n)%R -> (m <> n)%Z.
+Proof.
+intros m n H H'.
+apply H.
+now apply f_equal.
+Qed.
+
+Theorem Z2R_neq :
+ forall m n, (m <> n)%Z -> (Z2R m <> Z2R n)%R.
+Proof.
+intros m n.
+repeat rewrite Z2R_IZR.
+apply IZR_neq.
+Qed.
+
+Theorem Z2R_abs :
+ forall z, Z2R (Zabs z) = Rabs (Z2R z).
+Proof.
+intros.
+repeat rewrite Z2R_IZR.
+now rewrite Rabs_Zabs.
+Qed.
+
+End Z2R.
+
+(** Decidable comparison on reals *)
+Section Rcompare.
+
+Definition Rcompare x y :=
+ match total_order_T x y with
+ | inleft (left _) => Lt
+ | inleft (right _) => Eq
+ | inright _ => Gt
+ end.
+
+Inductive Rcompare_prop (x y : R) : comparison -> Prop :=
+ | Rcompare_Lt_ : (x < y)%R -> Rcompare_prop x y Lt
+ | Rcompare_Eq_ : x = y -> Rcompare_prop x y Eq
+ | Rcompare_Gt_ : (y < x)%R -> Rcompare_prop x y Gt.
+
+Theorem Rcompare_spec :
+ forall x y, Rcompare_prop x y (Rcompare x y).
+Proof.
+intros x y.
+unfold Rcompare.
+now destruct (total_order_T x y) as [[H|H]|H] ; constructor.
+Qed.
+
+Global Opaque Rcompare.
+
+Theorem Rcompare_Lt :
+ forall x y,
+ (x < y)%R -> Rcompare x y = Lt.
+Proof.
+intros x y H.
+case Rcompare_spec ; intro H'.
+easy.
+rewrite H' in H.
+elim (Rlt_irrefl _ H).
+elim (Rlt_irrefl x).
+now apply Rlt_trans with y.
+Qed.
+
+Theorem Rcompare_Lt_inv :
+ forall x y,
+ Rcompare x y = Lt -> (x < y)%R.
+Proof.
+intros x y.
+now case Rcompare_spec.
+Qed.
+
+Theorem Rcompare_not_Lt :
+ forall x y,
+ (y <= x)%R -> Rcompare x y <> Lt.
+Proof.
+intros x y H1 H2.
+apply Rle_not_lt with (1 := H1).
+now apply Rcompare_Lt_inv.
+Qed.
+
+Theorem Rcompare_not_Lt_inv :
+ forall x y,
+ Rcompare x y <> Lt -> (y <= x)%R.
+Proof.
+intros x y H.
+apply Rnot_lt_le.
+contradict H.
+now apply Rcompare_Lt.
+Qed.
+
+Theorem Rcompare_Eq :
+ forall x y,
+ x = y -> Rcompare x y = Eq.
+Proof.
+intros x y H.
+rewrite H.
+now case Rcompare_spec ; intro H' ; try elim (Rlt_irrefl _ H').
+Qed.
+
+Theorem Rcompare_Eq_inv :
+ forall x y,
+ Rcompare x y = Eq -> x = y.
+Proof.
+intros x y.
+now case Rcompare_spec.
+Qed.
+
+Theorem Rcompare_Gt :
+ forall x y,
+ (y < x)%R -> Rcompare x y = Gt.
+Proof.
+intros x y H.
+case Rcompare_spec ; intro H'.
+elim (Rlt_irrefl x).
+now apply Rlt_trans with y.
+rewrite H' in H.
+elim (Rlt_irrefl _ H).
+easy.
+Qed.
+
+Theorem Rcompare_Gt_inv :
+ forall x y,
+ Rcompare x y = Gt -> (y < x)%R.
+Proof.
+intros x y.
+now case Rcompare_spec.
+Qed.
+
+Theorem Rcompare_not_Gt :
+ forall x y,
+ (x <= y)%R -> Rcompare x y <> Gt.
+Proof.
+intros x y H1 H2.
+apply Rle_not_lt with (1 := H1).
+now apply Rcompare_Gt_inv.
+Qed.
+
+Theorem Rcompare_not_Gt_inv :
+ forall x y,
+ Rcompare x y <> Gt -> (x <= y)%R.
+Proof.
+intros x y H.
+apply Rnot_lt_le.
+contradict H.
+now apply Rcompare_Gt.
+Qed.
+
+Theorem Rcompare_Z2R :
+ forall x y, Rcompare (Z2R x) (Z2R y) = Zcompare x y.
+Proof.
+intros x y.
+case Rcompare_spec ; intros H ; apply sym_eq.
+apply Zcompare_Lt.
+now apply lt_Z2R.
+apply Zcompare_Eq.
+now apply eq_Z2R.
+apply Zcompare_Gt.
+now apply lt_Z2R.
+Qed.
+
+Theorem Rcompare_sym :
+ forall x y,
+ Rcompare x y = CompOpp (Rcompare y x).
+Proof.
+intros x y.
+destruct (Rcompare_spec y x) as [H|H|H].
+now apply Rcompare_Gt.
+now apply Rcompare_Eq.
+now apply Rcompare_Lt.
+Qed.
+
+Theorem Rcompare_plus_r :
+ forall z x y,
+ Rcompare (x + z) (y + z) = Rcompare x y.
+Proof.
+intros z x y.
+destruct (Rcompare_spec x y) as [H|H|H].
+apply Rcompare_Lt.
+now apply Rplus_lt_compat_r.
+apply Rcompare_Eq.
+now rewrite H.
+apply Rcompare_Gt.
+now apply Rplus_lt_compat_r.
+Qed.
+
+Theorem Rcompare_plus_l :
+ forall z x y,
+ Rcompare (z + x) (z + y) = Rcompare x y.
+Proof.
+intros z x y.
+rewrite 2!(Rplus_comm z).
+apply Rcompare_plus_r.
+Qed.
+
+Theorem Rcompare_mult_r :
+ forall z x y,
+ (0 < z)%R ->
+ Rcompare (x * z) (y * z) = Rcompare x y.
+Proof.
+intros z x y Hz.
+destruct (Rcompare_spec x y) as [H|H|H].
+apply Rcompare_Lt.
+now apply Rmult_lt_compat_r.
+apply Rcompare_Eq.
+now rewrite H.
+apply Rcompare_Gt.
+now apply Rmult_lt_compat_r.
+Qed.
+
+Theorem Rcompare_mult_l :
+ forall z x y,
+ (0 < z)%R ->
+ Rcompare (z * x) (z * y) = Rcompare x y.
+Proof.
+intros z x y.
+rewrite 2!(Rmult_comm z).
+apply Rcompare_mult_r.
+Qed.
+
+Theorem Rcompare_middle :
+ forall x d u,
+ Rcompare (x - d) (u - x) = Rcompare x ((d + u) / 2).
+Proof.
+intros x d u.
+rewrite <- (Rcompare_plus_r (- x / 2 - d / 2) x).
+rewrite <- (Rcompare_mult_r (/2) (x - d)).
+field_simplify (x + (- x / 2 - d / 2))%R.
+now field_simplify ((d + u) / 2 + (- x / 2 - d / 2))%R.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Rcompare_half_l :
+ forall x y, Rcompare (x / 2) y = Rcompare x (2 * y).
+Proof.
+intros x y.
+rewrite <- (Rcompare_mult_r 2%R).
+unfold Rdiv.
+rewrite Rmult_assoc, Rinv_l, Rmult_1_r.
+now rewrite Rmult_comm.
+now apply (Z2R_neq 2 0).
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Rcompare_half_r :
+ forall x y, Rcompare x (y / 2) = Rcompare (2 * x) y.
+Proof.
+intros x y.
+rewrite <- (Rcompare_mult_r 2%R).
+unfold Rdiv.
+rewrite Rmult_assoc, Rinv_l, Rmult_1_r.
+now rewrite Rmult_comm.
+now apply (Z2R_neq 2 0).
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Rcompare_sqr :
+ forall x y,
+ (0 <= x)%R -> (0 <= y)%R ->
+ Rcompare (x * x) (y * y) = Rcompare x y.
+Proof.
+intros x y Hx Hy.
+destruct (Rcompare_spec x y) as [H|H|H].
+apply Rcompare_Lt.
+now apply Rsqr_incrst_1.
+rewrite H.
+now apply Rcompare_Eq.
+apply Rcompare_Gt.
+now apply Rsqr_incrst_1.
+Qed.
+
+Theorem Rmin_compare :
+ forall x y,
+ Rmin x y = match Rcompare x y with Lt => x | Eq => x | Gt => y end.
+Proof.
+intros x y.
+unfold Rmin.
+destruct (Rle_dec x y) as [[Hx|Hx]|Hx].
+now rewrite Rcompare_Lt.
+now rewrite Rcompare_Eq.
+rewrite Rcompare_Gt.
+easy.
+now apply Rnot_le_lt.
+Qed.
+
+End Rcompare.
+
+Section Rle_bool.
+
+Definition Rle_bool x y :=
+ match Rcompare x y with
+ | Gt => false
+ | _ => true
+ end.
+
+Inductive Rle_bool_prop (x y : R) : bool -> Prop :=
+ | Rle_bool_true_ : (x <= y)%R -> Rle_bool_prop x y true
+ | Rle_bool_false_ : (y < x)%R -> Rle_bool_prop x y false.
+
+Theorem Rle_bool_spec :
+ forall x y, Rle_bool_prop x y (Rle_bool x y).
+Proof.
+intros x y.
+unfold Rle_bool.
+case Rcompare_spec ; constructor.
+now apply Rlt_le.
+rewrite H.
+apply Rle_refl.
+exact H.
+Qed.
+
+Theorem Rle_bool_true :
+ forall x y,
+ (x <= y)%R -> Rle_bool x y = true.
+Proof.
+intros x y Hxy.
+case Rle_bool_spec ; intros H.
+apply refl_equal.
+elim (Rlt_irrefl x).
+now apply Rle_lt_trans with y.
+Qed.
+
+Theorem Rle_bool_false :
+ forall x y,
+ (y < x)%R -> Rle_bool x y = false.
+Proof.
+intros x y Hxy.
+case Rle_bool_spec ; intros H.
+elim (Rlt_irrefl x).
+now apply Rle_lt_trans with y.
+apply refl_equal.
+Qed.
+
+End Rle_bool.
+
+Section Rlt_bool.
+
+Definition Rlt_bool x y :=
+ match Rcompare x y with
+ | Lt => true
+ | _ => false
+ end.
+
+Inductive Rlt_bool_prop (x y : R) : bool -> Prop :=
+ | Rlt_bool_true_ : (x < y)%R -> Rlt_bool_prop x y true
+ | Rlt_bool_false_ : (y <= x)%R -> Rlt_bool_prop x y false.
+
+Theorem Rlt_bool_spec :
+ forall x y, Rlt_bool_prop x y (Rlt_bool x y).
+Proof.
+intros x y.
+unfold Rlt_bool.
+case Rcompare_spec ; constructor.
+exact H.
+rewrite H.
+apply Rle_refl.
+now apply Rlt_le.
+Qed.
+
+Theorem negb_Rlt_bool :
+ forall x y,
+ negb (Rle_bool x y) = Rlt_bool y x.
+Proof.
+intros x y.
+unfold Rlt_bool, Rle_bool.
+rewrite Rcompare_sym.
+now case Rcompare.
+Qed.
+
+Theorem negb_Rle_bool :
+ forall x y,
+ negb (Rlt_bool x y) = Rle_bool y x.
+Proof.
+intros x y.
+unfold Rlt_bool, Rle_bool.
+rewrite Rcompare_sym.
+now case Rcompare.
+Qed.
+
+Theorem Rlt_bool_true :
+ forall x y,
+ (x < y)%R -> Rlt_bool x y = true.
+Proof.
+intros x y Hxy.
+rewrite <- negb_Rlt_bool.
+now rewrite Rle_bool_false.
+Qed.
+
+Theorem Rlt_bool_false :
+ forall x y,
+ (y <= x)%R -> Rlt_bool x y = false.
+Proof.
+intros x y Hxy.
+rewrite <- negb_Rlt_bool.
+now rewrite Rle_bool_true.
+Qed.
+
+End Rlt_bool.
+
+Section Req_bool.
+
+Definition Req_bool x y :=
+ match Rcompare x y with
+ | Eq => true
+ | _ => false
+ end.
+
+Inductive Req_bool_prop (x y : R) : bool -> Prop :=
+ | Req_bool_true_ : (x = y)%R -> Req_bool_prop x y true
+ | Req_bool_false_ : (x <> y)%R -> Req_bool_prop x y false.
+
+Theorem Req_bool_spec :
+ forall x y, Req_bool_prop x y (Req_bool x y).
+Proof.
+intros x y.
+unfold Req_bool.
+case Rcompare_spec ; constructor.
+now apply Rlt_not_eq.
+easy.
+now apply Rgt_not_eq.
+Qed.
+
+Theorem Req_bool_true :
+ forall x y,
+ (x = y)%R -> Req_bool x y = true.
+Proof.
+intros x y Hxy.
+case Req_bool_spec ; intros H.
+apply refl_equal.
+contradict H.
+exact Hxy.
+Qed.
+
+Theorem Req_bool_false :
+ forall x y,
+ (x <> y)%R -> Req_bool x y = false.
+Proof.
+intros x y Hxy.
+case Req_bool_spec ; intros H.
+contradict Hxy.
+exact H.
+apply refl_equal.
+Qed.
+
+End Req_bool.
+
+Section Floor_Ceil.
+
+(** Zfloor and Zceil *)
+Definition Zfloor (x : R) := (up x - 1)%Z.
+
+Theorem Zfloor_lb :
+ forall x : R,
+ (Z2R (Zfloor x) <= x)%R.
+Proof.
+intros x.
+unfold Zfloor.
+rewrite Z2R_minus.
+simpl.
+rewrite Z2R_IZR.
+apply Rplus_le_reg_r with (1 - x)%R.
+ring_simplify.
+exact (proj2 (archimed x)).
+Qed.
+
+Theorem Zfloor_ub :
+ forall x : R,
+ (x < Z2R (Zfloor x) + 1)%R.
+Proof.
+intros x.
+unfold Zfloor.
+rewrite Z2R_minus.
+unfold Rminus.
+rewrite Rplus_assoc.
+rewrite Rplus_opp_l, Rplus_0_r.
+rewrite Z2R_IZR.
+exact (proj1 (archimed x)).
+Qed.
+
+Theorem Zfloor_lub :
+ forall n x,
+ (Z2R n <= x)%R ->
+ (n <= Zfloor x)%Z.
+Proof.
+intros n x Hnx.
+apply Zlt_succ_le.
+apply lt_Z2R.
+apply Rle_lt_trans with (1 := Hnx).
+unfold Zsucc.
+rewrite Z2R_plus.
+apply Zfloor_ub.
+Qed.
+
+Theorem Zfloor_imp :
+ forall n x,
+ (Z2R n <= x < Z2R (n + 1))%R ->
+ Zfloor x = n.
+Proof.
+intros n x Hnx.
+apply Zle_antisym.
+apply Zlt_succ_le.
+apply lt_Z2R.
+apply Rle_lt_trans with (2 := proj2 Hnx).
+apply Zfloor_lb.
+now apply Zfloor_lub.
+Qed.
+
+Theorem Zfloor_Z2R :
+ forall n,
+ Zfloor (Z2R n) = n.
+Proof.
+intros n.
+apply Zfloor_imp.
+split.
+apply Rle_refl.
+apply Z2R_lt.
+apply Zlt_succ.
+Qed.
+
+Theorem Zfloor_le :
+ forall x y, (x <= y)%R ->
+ (Zfloor x <= Zfloor y)%Z.
+Proof.
+intros x y Hxy.
+apply Zfloor_lub.
+apply Rle_trans with (2 := Hxy).
+apply Zfloor_lb.
+Qed.
+
+Definition Zceil (x : R) := (- Zfloor (- x))%Z.
+
+Theorem Zceil_ub :
+ forall x : R,
+ (x <= Z2R (Zceil x))%R.
+Proof.
+intros x.
+unfold Zceil.
+rewrite Z2R_opp.
+apply Ropp_le_cancel.
+rewrite Ropp_involutive.
+apply Zfloor_lb.
+Qed.
+
+Theorem Zceil_glb :
+ forall n x,
+ (x <= Z2R n)%R ->
+ (Zceil x <= n)%Z.
+Proof.
+intros n x Hnx.
+unfold Zceil.
+apply Zopp_le_cancel.
+rewrite Zopp_involutive.
+apply Zfloor_lub.
+rewrite Z2R_opp.
+now apply Ropp_le_contravar.
+Qed.
+
+Theorem Zceil_imp :
+ forall n x,
+ (Z2R (n - 1) < x <= Z2R n)%R ->
+ Zceil x = n.
+Proof.
+intros n x Hnx.
+unfold Zceil.
+rewrite <- (Zopp_involutive n).
+apply f_equal.
+apply Zfloor_imp.
+split.
+rewrite Z2R_opp.
+now apply Ropp_le_contravar.
+rewrite <- (Zopp_involutive 1).
+rewrite <- Zopp_plus_distr.
+rewrite Z2R_opp.
+now apply Ropp_lt_contravar.
+Qed.
+
+Theorem Zceil_Z2R :
+ forall n,
+ Zceil (Z2R n) = n.
+Proof.
+intros n.
+unfold Zceil.
+rewrite <- Z2R_opp, Zfloor_Z2R.
+apply Zopp_involutive.
+Qed.
+
+Theorem Zceil_le :
+ forall x y, (x <= y)%R ->
+ (Zceil x <= Zceil y)%Z.
+Proof.
+intros x y Hxy.
+apply Zceil_glb.
+apply Rle_trans with (1 := Hxy).
+apply Zceil_ub.
+Qed.
+
+Theorem Zceil_floor_neq :
+ forall x : R,
+ (Z2R (Zfloor x) <> x)%R ->
+ (Zceil x = Zfloor x + 1)%Z.
+Proof.
+intros x Hx.
+apply Zceil_imp.
+split.
+ring_simplify (Zfloor x + 1 - 1)%Z.
+apply Rnot_le_lt.
+intros H.
+apply Hx.
+apply Rle_antisym.
+apply Zfloor_lb.
+exact H.
+apply Rlt_le.
+rewrite Z2R_plus.
+apply Zfloor_ub.
+Qed.
+
+Definition Ztrunc x := if Rlt_bool x 0 then Zceil x else Zfloor x.
+
+Theorem Ztrunc_Z2R :
+ forall n,
+ Ztrunc (Z2R n) = n.
+Proof.
+intros n.
+unfold Ztrunc.
+case Rlt_bool_spec ; intro H.
+apply Zceil_Z2R.
+apply Zfloor_Z2R.
+Qed.
+
+Theorem Ztrunc_floor :
+ forall x,
+ (0 <= x)%R ->
+ Ztrunc x = Zfloor x.
+Proof.
+intros x Hx.
+unfold Ztrunc.
+case Rlt_bool_spec ; intro H.
+elim Rlt_irrefl with x.
+now apply Rlt_le_trans with R0.
+apply refl_equal.
+Qed.
+
+Theorem Ztrunc_ceil :
+ forall x,
+ (x <= 0)%R ->
+ Ztrunc x = Zceil x.
+Proof.
+intros x Hx.
+unfold Ztrunc.
+case Rlt_bool_spec ; intro H.
+apply refl_equal.
+rewrite (Rle_antisym _ _ Hx H).
+fold (Z2R 0).
+rewrite Zceil_Z2R.
+apply Zfloor_Z2R.
+Qed.
+
+Theorem Ztrunc_le :
+ forall x y, (x <= y)%R ->
+ (Ztrunc x <= Ztrunc y)%Z.
+Proof.
+intros x y Hxy.
+unfold Ztrunc at 1.
+case Rlt_bool_spec ; intro Hx.
+unfold Ztrunc.
+case Rlt_bool_spec ; intro Hy.
+now apply Zceil_le.
+apply Zle_trans with 0%Z.
+apply Zceil_glb.
+now apply Rlt_le.
+now apply Zfloor_lub.
+rewrite Ztrunc_floor.
+now apply Zfloor_le.
+now apply Rle_trans with x.
+Qed.
+
+Theorem Ztrunc_opp :
+ forall x,
+ Ztrunc (- x) = Zopp (Ztrunc x).
+Proof.
+intros x.
+unfold Ztrunc at 2.
+case Rlt_bool_spec ; intros Hx.
+rewrite Ztrunc_floor.
+apply sym_eq.
+apply Zopp_involutive.
+rewrite <- Ropp_0.
+apply Ropp_le_contravar.
+now apply Rlt_le.
+rewrite Ztrunc_ceil.
+unfold Zceil.
+now rewrite Ropp_involutive.
+rewrite <- Ropp_0.
+now apply Ropp_le_contravar.
+Qed.
+
+Theorem Ztrunc_abs :
+ forall x,
+ Ztrunc (Rabs x) = Zabs (Ztrunc x).
+Proof.
+intros x.
+rewrite Ztrunc_floor. 2: apply Rabs_pos.
+unfold Ztrunc.
+case Rlt_bool_spec ; intro H.
+rewrite Rabs_left with (1 := H).
+rewrite Zabs_non_eq.
+apply sym_eq.
+apply Zopp_involutive.
+apply Zceil_glb.
+now apply Rlt_le.
+rewrite Rabs_pos_eq with (1 := H).
+apply sym_eq.
+apply Zabs_eq.
+now apply Zfloor_lub.
+Qed.
+
+Theorem Ztrunc_lub :
+ forall n x,
+ (Z2R n <= Rabs x)%R ->
+ (n <= Zabs (Ztrunc x))%Z.
+Proof.
+intros n x H.
+rewrite <- Ztrunc_abs.
+rewrite Ztrunc_floor. 2: apply Rabs_pos.
+now apply Zfloor_lub.
+Qed.
+
+Definition Zaway x := if Rlt_bool x 0 then Zfloor x else Zceil x.
+
+Theorem Zaway_Z2R :
+ forall n,
+ Zaway (Z2R n) = n.
+Proof.
+intros n.
+unfold Zaway.
+case Rlt_bool_spec ; intro H.
+apply Zfloor_Z2R.
+apply Zceil_Z2R.
+Qed.
+
+Theorem Zaway_ceil :
+ forall x,
+ (0 <= x)%R ->
+ Zaway x = Zceil x.
+Proof.
+intros x Hx.
+unfold Zaway.
+case Rlt_bool_spec ; intro H.
+elim Rlt_irrefl with x.
+now apply Rlt_le_trans with R0.
+apply refl_equal.
+Qed.
+
+Theorem Zaway_floor :
+ forall x,
+ (x <= 0)%R ->
+ Zaway x = Zfloor x.
+Proof.
+intros x Hx.
+unfold Zaway.
+case Rlt_bool_spec ; intro H.
+apply refl_equal.
+rewrite (Rle_antisym _ _ Hx H).
+fold (Z2R 0).
+rewrite Zfloor_Z2R.
+apply Zceil_Z2R.
+Qed.
+
+Theorem Zaway_le :
+ forall x y, (x <= y)%R ->
+ (Zaway x <= Zaway y)%Z.
+Proof.
+intros x y Hxy.
+unfold Zaway at 1.
+case Rlt_bool_spec ; intro Hx.
+unfold Zaway.
+case Rlt_bool_spec ; intro Hy.
+now apply Zfloor_le.
+apply le_Z2R.
+apply Rle_trans with 0%R.
+apply Rlt_le.
+apply Rle_lt_trans with (2 := Hx).
+apply Zfloor_lb.
+apply Rle_trans with (1 := Hy).
+apply Zceil_ub.
+rewrite Zaway_ceil.
+now apply Zceil_le.
+now apply Rle_trans with x.
+Qed.
+
+Theorem Zaway_opp :
+ forall x,
+ Zaway (- x) = Zopp (Zaway x).
+Proof.
+intros x.
+unfold Zaway at 2.
+case Rlt_bool_spec ; intro H.
+rewrite Zaway_ceil.
+unfold Zceil.
+now rewrite Ropp_involutive.
+apply Rlt_le.
+now apply Ropp_0_gt_lt_contravar.
+rewrite Zaway_floor.
+apply sym_eq.
+apply Zopp_involutive.
+rewrite <- Ropp_0.
+now apply Ropp_le_contravar.
+Qed.
+
+Theorem Zaway_abs :
+ forall x,
+ Zaway (Rabs x) = Zabs (Zaway x).
+Proof.
+intros x.
+rewrite Zaway_ceil. 2: apply Rabs_pos.
+unfold Zaway.
+case Rlt_bool_spec ; intro H.
+rewrite Rabs_left with (1 := H).
+rewrite Zabs_non_eq.
+apply (f_equal (fun v => - Zfloor v)%Z).
+apply Ropp_involutive.
+apply le_Z2R.
+apply Rlt_le.
+apply Rle_lt_trans with (2 := H).
+apply Zfloor_lb.
+rewrite Rabs_pos_eq with (1 := H).
+apply sym_eq.
+apply Zabs_eq.
+apply le_Z2R.
+apply Rle_trans with (1 := H).
+apply Zceil_ub.
+Qed.
+
+End Floor_Ceil.
+
+Section Zdiv_Rdiv.
+
+Theorem Zfloor_div :
+ forall x y,
+ y <> Z0 ->
+ Zfloor (Z2R x / Z2R y) = (x / y)%Z.
+Proof.
+intros x y Zy.
+generalize (Z_div_mod_eq_full x y Zy).
+intros Hx.
+rewrite Hx at 1.
+assert (Zy': Z2R y <> R0).
+contradict Zy.
+now apply eq_Z2R.
+unfold Rdiv.
+rewrite Z2R_plus, Rmult_plus_distr_r, Z2R_mult.
+replace (Z2R y * Z2R (x / y) * / Z2R y)%R with (Z2R (x / y)) by now field.
+apply Zfloor_imp.
+rewrite Z2R_plus.
+assert (0 <= Z2R (x mod y) * / Z2R y < 1)%R.
+(* *)
+assert (forall x' y', (0 < y')%Z -> 0 <= Z2R (x' mod y') * / Z2R y' < 1)%R.
+(* . *)
+clear.
+intros x y Hy.
+split.
+apply Rmult_le_pos.
+apply (Z2R_le 0).
+refine (proj1 (Z_mod_lt _ _ _)).
+now apply Zlt_gt.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0).
+apply Rmult_lt_reg_r with (Z2R y).
+now apply (Z2R_lt 0).
+rewrite Rmult_1_l, Rmult_assoc, Rinv_l, Rmult_1_r.
+apply Z2R_lt.
+eapply Z_mod_lt.
+now apply Zlt_gt.
+apply Rgt_not_eq.
+now apply (Z2R_lt 0).
+(* . *)
+destruct (Z_lt_le_dec y 0) as [Hy|Hy].
+rewrite <- Rmult_opp_opp.
+rewrite Ropp_inv_permute with (1 := Zy').
+rewrite <- 2!Z2R_opp.
+rewrite <- Zmod_opp_opp.
+apply H.
+clear -Hy. omega.
+apply H.
+clear -Zy Hy. omega.
+(* *)
+split.
+pattern (Z2R (x / y)) at 1 ; rewrite <- Rplus_0_r.
+apply Rplus_le_compat_l.
+apply H.
+apply Rplus_lt_compat_l.
+apply H.
+Qed.
+
+End Zdiv_Rdiv.
+
+Section pow.
+
+Variable r : radix.
+
+Theorem radix_pos : (0 < Z2R r)%R.
+Proof.
+destruct r as (v, Hr). simpl.
+apply (Z2R_lt 0).
+apply Zlt_le_trans with 2%Z.
+easy.
+now apply Zle_bool_imp_le.
+Qed.
+
+(** Well-used function called bpow for computing the power function #&beta;#^e *)
+Definition bpow e :=
+ match e with
+ | Zpos p => Z2R (Zpower_pos r p)
+ | Zneg p => Rinv (Z2R (Zpower_pos r p))
+ | Z0 => R1
+ end.
+
+Theorem Z2R_Zpower_pos :
+ forall n m,
+ Z2R (Zpower_pos n m) = powerRZ (Z2R n) (Zpos m).
+Proof.
+intros.
+rewrite Zpower_pos_nat.
+simpl.
+induction (nat_of_P m).
+apply refl_equal.
+unfold Zpower_nat.
+simpl.
+rewrite Z2R_mult.
+apply Rmult_eq_compat_l.
+exact IHn0.
+Qed.
+
+Theorem bpow_powerRZ :
+ forall e,
+ bpow e = powerRZ (Z2R r) e.
+Proof.
+destruct e ; unfold bpow.
+reflexivity.
+now rewrite Z2R_Zpower_pos.
+now rewrite Z2R_Zpower_pos.
+Qed.
+
+Theorem bpow_ge_0 :
+ forall e : Z, (0 <= bpow e)%R.
+Proof.
+intros.
+rewrite bpow_powerRZ.
+apply powerRZ_le.
+apply radix_pos.
+Qed.
+
+Theorem bpow_gt_0 :
+ forall e : Z, (0 < bpow e)%R.
+Proof.
+intros.
+rewrite bpow_powerRZ.
+apply powerRZ_lt.
+apply radix_pos.
+Qed.
+
+Theorem bpow_plus :
+ forall e1 e2 : Z, (bpow (e1 + e2) = bpow e1 * bpow e2)%R.
+Proof.
+intros.
+repeat rewrite bpow_powerRZ.
+apply powerRZ_add.
+apply Rgt_not_eq.
+apply radix_pos.
+Qed.
+
+Theorem bpow_1 :
+ bpow 1 = Z2R r.
+Proof.
+unfold bpow, Zpower_pos. simpl.
+now rewrite Zmult_1_r.
+Qed.
+
+Theorem bpow_plus1 :
+ forall e : Z, (bpow (e + 1) = Z2R r * bpow e)%R.
+Proof.
+intros.
+rewrite <- bpow_1.
+rewrite <- bpow_plus.
+now rewrite Zplus_comm.
+Qed.
+
+Theorem bpow_opp :
+ forall e : Z, (bpow (-e) = /bpow e)%R.
+Proof.
+intros e; destruct e.
+simpl; now rewrite Rinv_1.
+now replace (-Zpos p)%Z with (Zneg p) by reflexivity.
+replace (-Zneg p)%Z with (Zpos p) by reflexivity.
+simpl; rewrite Rinv_involutive; trivial.
+generalize (bpow_gt_0 (Zpos p)).
+simpl; auto with real.
+Qed.
+
+Theorem Z2R_Zpower_nat :
+ forall e : nat,
+ Z2R (Zpower_nat r e) = bpow (Z_of_nat e).
+Proof.
+intros [|e].
+split.
+rewrite <- nat_of_P_o_P_of_succ_nat_eq_succ.
+rewrite <- Zpower_pos_nat.
+now rewrite <- Zpos_eq_Z_of_nat_o_nat_of_P.
+Qed.
+
+Theorem Z2R_Zpower :
+ forall e : Z,
+ (0 <= e)%Z ->
+ Z2R (Zpower r e) = bpow e.
+Proof.
+intros [|e|e] H.
+split.
+split.
+now elim H.
+Qed.
+
+Theorem bpow_lt :
+ forall e1 e2 : Z,
+ (e1 < e2)%Z -> (bpow e1 < bpow e2)%R.
+Proof.
+intros e1 e2 H.
+replace e2 with (e1 + (e2 - e1))%Z by ring.
+rewrite <- (Rmult_1_r (bpow e1)).
+rewrite bpow_plus.
+apply Rmult_lt_compat_l.
+apply bpow_gt_0.
+assert (0 < e2 - e1)%Z by omega.
+destruct (e2 - e1)%Z ; try discriminate H0.
+clear.
+rewrite <- Z2R_Zpower by easy.
+apply (Z2R_lt 1).
+now apply Zpower_gt_1.
+Qed.
+
+Theorem lt_bpow :
+ forall e1 e2 : Z,
+ (bpow e1 < bpow e2)%R -> (e1 < e2)%Z.
+Proof.
+intros e1 e2 H.
+apply Zgt_lt.
+apply Znot_le_gt.
+intros H'.
+apply Rlt_not_le with (1 := H).
+destruct (Zle_lt_or_eq _ _ H').
+apply Rlt_le.
+now apply bpow_lt.
+rewrite H0.
+apply Rle_refl.
+Qed.
+
+Theorem bpow_le :
+ forall e1 e2 : Z,
+ (e1 <= e2)%Z -> (bpow e1 <= bpow e2)%R.
+Proof.
+intros e1 e2 H.
+apply Rnot_lt_le.
+intros H'.
+apply Zle_not_gt with (1 := H).
+apply Zlt_gt.
+now apply lt_bpow.
+Qed.
+
+Theorem le_bpow :
+ forall e1 e2 : Z,
+ (bpow e1 <= bpow e2)%R -> (e1 <= e2)%Z.
+Proof.
+intros e1 e2 H.
+apply Znot_gt_le.
+intros H'.
+apply Rle_not_lt with (1 := H).
+apply bpow_lt.
+now apply Zgt_lt.
+Qed.
+
+Theorem bpow_inj :
+ forall e1 e2 : Z,
+ bpow e1 = bpow e2 -> e1 = e2.
+Proof.
+intros.
+apply Zle_antisym.
+apply le_bpow.
+now apply Req_le.
+apply le_bpow.
+now apply Req_le.
+Qed.
+
+Theorem bpow_exp :
+ forall e : Z,
+ bpow e = exp (Z2R e * ln (Z2R r)).
+Proof.
+(* positive case *)
+assert (forall e, bpow (Zpos e) = exp (Z2R (Zpos e) * ln (Z2R r))).
+intros e.
+unfold bpow.
+rewrite Zpower_pos_nat.
+unfold Z2R at 2.
+rewrite P2R_INR.
+induction (nat_of_P e).
+rewrite Rmult_0_l.
+now rewrite exp_0.
+rewrite Zpower_nat_S.
+rewrite S_INR.
+rewrite Rmult_plus_distr_r.
+rewrite exp_plus.
+rewrite Rmult_1_l.
+rewrite exp_ln.
+rewrite <- IHn.
+rewrite <- Z2R_mult.
+now rewrite Zmult_comm.
+apply radix_pos.
+(* general case *)
+intros [|e|e].
+rewrite Rmult_0_l.
+now rewrite exp_0.
+apply H.
+unfold bpow.
+change (Z2R (Zpower_pos r e)) with (bpow (Zpos e)).
+rewrite H.
+rewrite <- exp_Ropp.
+rewrite <- Ropp_mult_distr_l_reverse.
+now rewrite <- Z2R_opp.
+Qed.
+
+(** Another well-used function for having the logarithm of a real number x to the base #&beta;# *)
+Record ln_beta_prop x := {
+ ln_beta_val :> Z ;
+ _ : (x <> 0)%R -> (bpow (ln_beta_val - 1)%Z <= Rabs x < bpow ln_beta_val)%R
+}.
+
+Definition ln_beta :
+ forall x : R, ln_beta_prop x.
+Proof.
+intros x.
+set (fact := ln (Z2R r)).
+(* . *)
+assert (0 < fact)%R.
+apply exp_lt_inv.
+rewrite exp_0.
+unfold fact.
+rewrite exp_ln.
+apply (Z2R_lt 1).
+apply radix_gt_1.
+apply radix_pos.
+(* . *)
+exists (Zfloor (ln (Rabs x) / fact) + 1)%Z.
+intros Hx'.
+generalize (Rabs_pos_lt _ Hx'). clear Hx'.
+generalize (Rabs x). clear x.
+intros x Hx.
+rewrite 2!bpow_exp.
+fold fact.
+pattern x at 2 3 ; replace x with (exp (ln x * / fact * fact)).
+split.
+rewrite Z2R_minus.
+apply exp_le.
+apply Rmult_le_compat_r.
+now apply Rlt_le.
+unfold Rminus.
+rewrite Z2R_plus.
+rewrite Rplus_assoc.
+rewrite Rplus_opp_r, Rplus_0_r.
+apply Zfloor_lb.
+apply exp_increasing.
+apply Rmult_lt_compat_r.
+exact H.
+rewrite Z2R_plus.
+apply Zfloor_ub.
+rewrite Rmult_assoc.
+rewrite Rinv_l.
+rewrite Rmult_1_r.
+now apply exp_ln.
+now apply Rgt_not_eq.
+Qed.
+
+Theorem bpow_lt_bpow :
+ forall e1 e2,
+ (bpow (e1 - 1) < bpow e2)%R ->
+ (e1 <= e2)%Z.
+Proof.
+intros e1 e2 He.
+rewrite (Zsucc_pred e1).
+apply Zlt_le_succ.
+now apply lt_bpow.
+Qed.
+
+Theorem bpow_unique :
+ forall x e1 e2,
+ (bpow (e1 - 1) <= x < bpow e1)%R ->
+ (bpow (e2 - 1) <= x < bpow e2)%R ->
+ e1 = e2.
+Proof.
+intros x e1 e2 (H1a,H1b) (H2a,H2b).
+apply Zle_antisym ;
+ apply bpow_lt_bpow ;
+ apply Rle_lt_trans with x ;
+ assumption.
+Qed.
+
+Theorem ln_beta_unique :
+ forall (x : R) (e : Z),
+ (bpow (e - 1) <= Rabs x < bpow e)%R ->
+ ln_beta x = e :> Z.
+Proof.
+intros x e1 He.
+destruct (Req_dec x 0) as [Hx|Hx].
+elim Rle_not_lt with (1 := proj1 He).
+rewrite Hx, Rabs_R0.
+apply bpow_gt_0.
+destruct (ln_beta x) as (e2, Hx2).
+simpl.
+apply bpow_unique with (2 := He).
+now apply Hx2.
+Qed.
+
+Theorem ln_beta_opp :
+ forall x,
+ ln_beta (-x) = ln_beta x :> Z.
+Proof.
+intros x.
+destruct (Req_dec x 0) as [Hx|Hx].
+now rewrite Hx, Ropp_0.
+destruct (ln_beta x) as (e, He).
+simpl.
+specialize (He Hx).
+apply ln_beta_unique.
+now rewrite Rabs_Ropp.
+Qed.
+
+Theorem ln_beta_abs :
+ forall x,
+ ln_beta (Rabs x) = ln_beta x :> Z.
+Proof.
+intros x.
+unfold Rabs.
+case Rcase_abs ; intros _.
+apply ln_beta_opp.
+apply refl_equal.
+Qed.
+
+Theorem ln_beta_unique_pos :
+ forall (x : R) (e : Z),
+ (bpow (e - 1) <= x < bpow e)%R ->
+ ln_beta x = e :> Z.
+Proof.
+intros x e1 He1.
+rewrite <- ln_beta_abs.
+apply ln_beta_unique.
+rewrite 2!Rabs_pos_eq.
+exact He1.
+apply Rle_trans with (2 := proj1 He1).
+apply bpow_ge_0.
+apply Rabs_pos.
+Qed.
+
+Theorem ln_beta_le_abs :
+ forall x y,
+ (x <> 0)%R -> (Rabs x <= Rabs y)%R ->
+ (ln_beta x <= ln_beta y)%Z.
+Proof.
+intros x y H0x Hxy.
+destruct (ln_beta x) as (ex, Hx).
+destruct (ln_beta y) as (ey, Hy).
+simpl.
+apply bpow_lt_bpow.
+specialize (Hx H0x).
+apply Rle_lt_trans with (1 := proj1 Hx).
+apply Rle_lt_trans with (1 := Hxy).
+apply Hy.
+intros Hy'.
+apply Rlt_not_le with (1 := Rabs_pos_lt _ H0x).
+apply Rle_trans with (1 := Hxy).
+rewrite Hy', Rabs_R0.
+apply Rle_refl.
+Qed.
+
+Theorem ln_beta_le :
+ forall x y,
+ (0 < x)%R -> (x <= y)%R ->
+ (ln_beta x <= ln_beta y)%Z.
+Proof.
+intros x y H0x Hxy.
+apply ln_beta_le_abs.
+now apply Rgt_not_eq.
+rewrite 2!Rabs_pos_eq.
+exact Hxy.
+apply Rle_trans with (2 := Hxy).
+now apply Rlt_le.
+now apply Rlt_le.
+Qed.
+
+Theorem ln_beta_bpow :
+ forall e, (ln_beta (bpow e) = e + 1 :> Z)%Z.
+Proof.
+intros e.
+apply ln_beta_unique.
+rewrite Rabs_right.
+replace (e + 1 - 1)%Z with e by ring.
+split.
+apply Rle_refl.
+apply bpow_lt.
+apply Zlt_succ.
+apply Rle_ge.
+apply bpow_ge_0.
+Qed.
+
+Theorem ln_beta_mult_bpow :
+ forall x e, x <> R0 ->
+ (ln_beta (x * bpow e) = ln_beta x + e :>Z)%Z.
+Proof.
+intros x e Zx.
+destruct (ln_beta x) as (ex, Ex) ; simpl.
+specialize (Ex Zx).
+apply ln_beta_unique.
+rewrite Rabs_mult.
+rewrite (Rabs_pos_eq (bpow e)) by apply bpow_ge_0.
+split.
+replace (ex + e - 1)%Z with (ex - 1 + e)%Z by ring.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Ex.
+rewrite bpow_plus.
+apply Rmult_lt_compat_r.
+apply bpow_gt_0.
+apply Ex.
+Qed.
+
+Theorem ln_beta_le_bpow :
+ forall x e,
+ x <> R0 ->
+ (Rabs x < bpow e)%R ->
+ (ln_beta x <= e)%Z.
+Proof.
+intros x e Zx Hx.
+destruct (ln_beta x) as (ex, Ex) ; simpl.
+specialize (Ex Zx).
+apply bpow_lt_bpow.
+now apply Rle_lt_trans with (Rabs x).
+Qed.
+
+Theorem ln_beta_gt_bpow :
+ forall x e,
+ (bpow e <= Rabs x)%R ->
+ (e < ln_beta x)%Z.
+Proof.
+intros x e Hx.
+destruct (ln_beta x) as (ex, Ex) ; simpl.
+apply lt_bpow.
+apply Rle_lt_trans with (1 := Hx).
+apply Ex.
+intros Zx.
+apply Rle_not_lt with (1 := Hx).
+rewrite Zx, Rabs_R0.
+apply bpow_gt_0.
+Qed.
+
+Theorem bpow_ln_beta_gt :
+ forall x,
+ (Rabs x < bpow (ln_beta x))%R.
+Proof.
+intros x.
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx, Rabs_R0.
+apply bpow_gt_0.
+destruct (ln_beta x) as (ex, Ex) ; simpl.
+now apply Ex.
+Qed.
+
+Theorem ln_beta_le_Zpower :
+ forall m e,
+ m <> Z0 ->
+ (Zabs m < Zpower r e)%Z->
+ (ln_beta (Z2R m) <= e)%Z.
+Proof.
+intros m e Zm Hm.
+apply ln_beta_le_bpow.
+exact (Z2R_neq m 0 Zm).
+destruct (Zle_or_lt 0 e).
+rewrite <- Z2R_abs, <- Z2R_Zpower with (1 := H).
+now apply Z2R_lt.
+elim Zm.
+cut (Zabs m < 0)%Z.
+now case m.
+clear -Hm H.
+now destruct e.
+Qed.
+
+Theorem ln_beta_gt_Zpower :
+ forall m e,
+ m <> Z0 ->
+ (Zpower r e <= Zabs m)%Z ->
+ (e < ln_beta (Z2R m))%Z.
+Proof.
+intros m e Zm Hm.
+apply ln_beta_gt_bpow.
+rewrite <- Z2R_abs.
+destruct (Zle_or_lt 0 e).
+rewrite <- Z2R_Zpower with (1 := H).
+now apply Z2R_le.
+apply Rle_trans with (bpow 0).
+apply bpow_le.
+now apply Zlt_le_weak.
+apply (Z2R_le 1).
+clear -Zm.
+zify ; omega.
+Qed.
+
+End pow.
+
+Section Bool.
+
+Theorem eqb_sym :
+ forall x y, Bool.eqb x y = Bool.eqb y x.
+Proof.
+now intros [|] [|].
+Qed.
+
+Theorem eqb_false :
+ forall x y, x = negb y -> Bool.eqb x y = false.
+Proof.
+now intros [|] [|].
+Qed.
+
+Theorem eqb_true :
+ forall x y, x = y -> Bool.eqb x y = true.
+Proof.
+now intros [|] [|].
+Qed.
+
+End Bool.
+
+Section cond_Ropp.
+
+Definition cond_Ropp (b : bool) m := if b then Ropp m else m.
+
+Theorem Z2R_cond_Zopp :
+ forall b m,
+ Z2R (cond_Zopp b m) = cond_Ropp b (Z2R m).
+Proof.
+intros [|] m.
+apply Z2R_opp.
+apply refl_equal.
+Qed.
+
+Theorem abs_cond_Ropp :
+ forall b m,
+ Rabs (cond_Ropp b m) = Rabs m.
+Proof.
+intros [|] m.
+apply Rabs_Ropp.
+apply refl_equal.
+Qed.
+
+Theorem cond_Ropp_Rlt_bool :
+ forall m,
+ cond_Ropp (Rlt_bool m 0) m = Rabs m.
+Proof.
+intros m.
+apply sym_eq.
+case Rlt_bool_spec ; intros Hm.
+now apply Rabs_left.
+now apply Rabs_pos_eq.
+Qed.
+
+Theorem cond_Ropp_involutive :
+ forall b x,
+ cond_Ropp b (cond_Ropp b x) = x.
+Proof.
+intros [|] x.
+apply Ropp_involutive.
+apply refl_equal.
+Qed.
+
+Theorem cond_Ropp_even_function :
+ forall {A : Type} (f : R -> A),
+ (forall x, f (Ropp x) = f x) ->
+ forall b x, f (cond_Ropp b x) = f x.
+Proof.
+now intros A f Hf [|] x ; simpl.
+Qed.
+
+Theorem cond_Ropp_odd_function :
+ forall (f : R -> R),
+ (forall x, f (Ropp x) = Ropp (f x)) ->
+ forall b x, f (cond_Ropp b x) = cond_Ropp b (f x).
+Proof.
+now intros f Hf [|] x ; simpl.
+Qed.
+
+Theorem cond_Ropp_inj :
+ forall b x y,
+ cond_Ropp b x = cond_Ropp b y -> x = y.
+Proof.
+intros b x y H.
+rewrite <- (cond_Ropp_involutive b x), H.
+apply cond_Ropp_involutive.
+Qed.
+
+Theorem cond_Ropp_mult_l :
+ forall b x y,
+ cond_Ropp b (x * y) = (cond_Ropp b x * y)%R.
+Proof.
+intros [|] x y.
+apply sym_eq.
+apply Ropp_mult_distr_l_reverse.
+apply refl_equal.
+Qed.
+
+Theorem cond_Ropp_mult_r :
+ forall b x y,
+ cond_Ropp b (x * y) = (x * cond_Ropp b y)%R.
+Proof.
+intros [|] x y.
+apply sym_eq.
+apply Ropp_mult_distr_r_reverse.
+apply refl_equal.
+Qed.
+
+Theorem cond_Ropp_plus :
+ forall b x y,
+ cond_Ropp b (x + y) = (cond_Ropp b x + cond_Ropp b y)%R.
+Proof.
+intros [|] x y.
+apply Ropp_plus_distr.
+apply refl_equal.
+Qed.
+
+End cond_Ropp.
diff --git a/flocq/Core/Fcore_Zaux.v b/flocq/Core/Fcore_Zaux.v
new file mode 100644
index 0000000..af0d837
--- /dev/null
+++ b/flocq/Core/Fcore_Zaux.v
@@ -0,0 +1,774 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+Require Import ZArith.
+Require Import ZOdiv.
+
+Section Zmissing.
+
+(** About Z *)
+Theorem Zopp_le_cancel :
+ forall x y : Z,
+ (-y <= -x)%Z -> Zle x y.
+Proof.
+intros x y Hxy.
+apply Zplus_le_reg_r with (-x - y)%Z.
+now ring_simplify.
+Qed.
+
+Theorem Zgt_not_eq :
+ forall x y : Z,
+ (y < x)%Z -> (x <> y)%Z.
+Proof.
+intros x y H Hn.
+apply Zlt_irrefl with x.
+now rewrite Hn at 1.
+Qed.
+
+End Zmissing.
+
+Section Proof_Irrelevance.
+
+Scheme eq_dep_elim := Induction for eq Sort Type.
+
+Definition eqbool_dep P (h1 : P true) b :=
+ match b return P b -> Prop with
+ | true => fun (h2 : P true) => h1 = h2
+ | false => fun (h2 : P false) => False
+ end.
+
+Lemma eqbool_irrelevance : forall (b : bool) (h1 h2 : b = true), h1 = h2.
+Proof.
+assert (forall (h : true = true), refl_equal true = h).
+apply (eq_dep_elim bool true (eqbool_dep _ _) (refl_equal _)).
+intros b.
+case b.
+intros h1 h2.
+now rewrite <- (H h1).
+intros h.
+discriminate h.
+Qed.
+
+End Proof_Irrelevance.
+
+Section Even_Odd.
+
+(** Zeven, used for rounding to nearest, ties to even *)
+Definition Zeven (n : Z) :=
+ match n with
+ | Zpos (xO _) => true
+ | Zneg (xO _) => true
+ | Z0 => true
+ | _ => false
+ end.
+
+Theorem Zeven_mult :
+ forall x y, Zeven (x * y) = orb (Zeven x) (Zeven y).
+Proof.
+now intros [|[xp|xp|]|[xp|xp|]] [|[yp|yp|]|[yp|yp|]].
+Qed.
+
+Theorem Zeven_opp :
+ forall x, Zeven (- x) = Zeven x.
+Proof.
+now intros [|[n|n|]|[n|n|]].
+Qed.
+
+Theorem Zeven_ex :
+ forall x, exists p, x = (2 * p + if Zeven x then 0 else 1)%Z.
+Proof.
+intros [|[n|n|]|[n|n|]].
+now exists Z0.
+now exists (Zpos n).
+now exists (Zpos n).
+now exists Z0.
+exists (Zneg n - 1)%Z.
+change (2 * Zneg n - 1 = 2 * (Zneg n - 1) + 1)%Z.
+ring.
+now exists (Zneg n).
+now exists (-1)%Z.
+Qed.
+
+Theorem Zeven_2xp1 :
+ forall n, Zeven (2 * n + 1) = false.
+Proof.
+intros n.
+destruct (Zeven_ex (2 * n + 1)) as (p, Hp).
+revert Hp.
+case (Zeven (2 * n + 1)) ; try easy.
+intros H.
+apply False_ind.
+omega.
+Qed.
+
+Theorem Zeven_plus :
+ forall x y, Zeven (x + y) = Bool.eqb (Zeven x) (Zeven y).
+Proof.
+intros x y.
+destruct (Zeven_ex x) as (px, Hx).
+rewrite Hx at 1.
+destruct (Zeven_ex y) as (py, Hy).
+rewrite Hy at 1.
+replace (2 * px + (if Zeven x then 0 else 1) + (2 * py + (if Zeven y then 0 else 1)))%Z
+ with (2 * (px + py) + ((if Zeven x then 0 else 1) + (if Zeven y then 0 else 1)))%Z by ring.
+case (Zeven x) ; case (Zeven y).
+rewrite Zplus_0_r.
+now rewrite Zeven_mult.
+apply Zeven_2xp1.
+apply Zeven_2xp1.
+replace (2 * (px + py) + (1 + 1))%Z with (2 * (px + py + 1))%Z by ring.
+now rewrite Zeven_mult.
+Qed.
+
+End Even_Odd.
+
+Section Zpower.
+
+Theorem Zpower_plus :
+ forall n k1 k2, (0 <= k1)%Z -> (0 <= k2)%Z ->
+ Zpower n (k1 + k2) = (Zpower n k1 * Zpower n k2)%Z.
+Proof.
+intros n k1 k2 H1 H2.
+now apply Zpower_exp ; apply Zle_ge.
+Qed.
+
+Theorem Zpower_Zpower_nat :
+ forall b e, (0 <= e)%Z ->
+ Zpower b e = Zpower_nat b (Zabs_nat e).
+Proof.
+intros b [|e|e] He.
+apply refl_equal.
+apply Zpower_pos_nat.
+elim He.
+apply refl_equal.
+Qed.
+
+Theorem Zpower_nat_S :
+ forall b e,
+ Zpower_nat b (S e) = (b * Zpower_nat b e)%Z.
+Proof.
+intros b e.
+rewrite (Zpower_nat_is_exp 1 e).
+apply (f_equal (fun x => x * _)%Z).
+apply Zmult_1_r.
+Qed.
+
+Theorem Zpower_pos_gt_0 :
+ forall b p, (0 < b)%Z ->
+ (0 < Zpower_pos b p)%Z.
+Proof.
+intros b p Hb.
+rewrite Zpower_pos_nat.
+induction (nat_of_P p).
+easy.
+rewrite Zpower_nat_S.
+now apply Zmult_lt_0_compat.
+Qed.
+
+Theorem Zeven_Zpower :
+ forall b e, (0 < e)%Z ->
+ Zeven (Zpower b e) = Zeven b.
+Proof.
+intros b e He.
+case_eq (Zeven b) ; intros Hb.
+(* b even *)
+replace e with (e - 1 + 1)%Z by ring.
+rewrite Zpower_exp.
+rewrite Zeven_mult.
+replace (Zeven (b ^ 1)) with true.
+apply Bool.orb_true_r.
+unfold Zpower, Zpower_pos. simpl.
+now rewrite Zmult_1_r.
+omega.
+discriminate.
+(* b odd *)
+rewrite Zpower_Zpower_nat.
+induction (Zabs_nat e).
+easy.
+unfold Zpower_nat. simpl.
+rewrite Zeven_mult.
+now rewrite Hb.
+now apply Zlt_le_weak.
+Qed.
+
+Theorem Zeven_Zpower_odd :
+ forall b e, (0 <= e)%Z -> Zeven b = false ->
+ Zeven (Zpower b e) = false.
+Proof.
+intros b e He Hb.
+destruct (Z_le_lt_eq_dec _ _ He) as [He'|He'].
+rewrite <- Hb.
+now apply Zeven_Zpower.
+now rewrite <- He'.
+Qed.
+
+(** The radix must be greater than 1 *)
+Record radix := { radix_val :> Z ; radix_prop : Zle_bool 2 radix_val = true }.
+
+Theorem radix_val_inj :
+ forall r1 r2, radix_val r1 = radix_val r2 -> r1 = r2.
+Proof.
+intros (r1, H1) (r2, H2) H.
+simpl in H.
+revert H1.
+rewrite H.
+intros H1.
+apply f_equal.
+apply eqbool_irrelevance.
+Qed.
+
+Variable r : radix.
+
+Theorem radix_gt_0 : (0 < r)%Z.
+Proof.
+apply Zlt_le_trans with 2%Z.
+easy.
+apply Zle_bool_imp_le.
+apply r.
+Qed.
+
+Theorem radix_gt_1 : (1 < r)%Z.
+Proof.
+destruct r as (v, Hr). simpl.
+apply Zlt_le_trans with 2%Z.
+easy.
+now apply Zle_bool_imp_le.
+Qed.
+
+Theorem Zpower_gt_1 :
+ forall p,
+ (0 < p)%Z ->
+ (1 < Zpower r p)%Z.
+Proof.
+intros [|p|p] Hp ; try easy.
+simpl.
+rewrite Zpower_pos_nat.
+generalize (lt_O_nat_of_P p).
+induction (nat_of_P p).
+easy.
+intros _.
+rewrite Zpower_nat_S.
+assert (0 < Zpower_nat r n)%Z.
+clear.
+induction n.
+easy.
+rewrite Zpower_nat_S.
+apply Zmult_lt_0_compat with (2 := IHn).
+apply radix_gt_0.
+apply Zle_lt_trans with (1 * Zpower_nat r n)%Z.
+rewrite Zmult_1_l.
+now apply (Zlt_le_succ 0).
+apply Zmult_lt_compat_r with (1 := H).
+apply radix_gt_1.
+Qed.
+
+Theorem Zpower_gt_0 :
+ forall p,
+ (0 <= p)%Z ->
+ (0 < Zpower r p)%Z.
+Proof.
+intros p Hp.
+rewrite Zpower_Zpower_nat with (1 := Hp).
+induction (Zabs_nat p).
+easy.
+rewrite Zpower_nat_S.
+apply Zmult_lt_0_compat with (2 := IHn).
+apply radix_gt_0.
+Qed.
+
+Theorem Zpower_ge_0 :
+ forall e,
+ (0 <= Zpower r e)%Z.
+Proof.
+intros [|e|e] ; try easy.
+apply Zlt_le_weak.
+now apply Zpower_gt_0.
+Qed.
+
+Theorem Zpower_le :
+ forall e1 e2, (e1 <= e2)%Z ->
+ (Zpower r e1 <= Zpower r e2)%Z.
+Proof.
+intros e1 e2 He.
+destruct (Zle_or_lt 0 e1)%Z as [H1|H1].
+replace e2 with (e2 - e1 + e1)%Z by ring.
+rewrite Zpower_plus with (2 := H1).
+rewrite <- (Zmult_1_l (r ^ e1)) at 1.
+apply Zmult_le_compat_r.
+apply (Zlt_le_succ 0).
+apply Zpower_gt_0.
+now apply Zle_minus_le_0.
+apply Zpower_ge_0.
+now apply Zle_minus_le_0.
+clear He.
+destruct e1 as [|e1|e1] ; try easy.
+apply Zpower_ge_0.
+Qed.
+
+Theorem Zpower_lt :
+ forall e1 e2, (0 <= e2)%Z -> (e1 < e2)%Z ->
+ (Zpower r e1 < Zpower r e2)%Z.
+Proof.
+intros e1 e2 He2 He.
+destruct (Zle_or_lt 0 e1)%Z as [H1|H1].
+replace e2 with (e2 - e1 + e1)%Z by ring.
+rewrite Zpower_plus with (2 := H1).
+rewrite Zmult_comm.
+rewrite <- (Zmult_1_r (r ^ e1)) at 1.
+apply Zmult_lt_compat2.
+split.
+now apply Zpower_gt_0.
+apply Zle_refl.
+split.
+easy.
+apply Zpower_gt_1.
+clear -He ; omega.
+apply Zle_minus_le_0.
+now apply Zlt_le_weak.
+revert H1.
+clear -He2.
+destruct e1 ; try easy.
+intros _.
+now apply Zpower_gt_0.
+Qed.
+
+Theorem Zpower_lt_Zpower :
+ forall e1 e2,
+ (Zpower r (e1 - 1) < Zpower r e2)%Z ->
+ (e1 <= e2)%Z.
+Proof.
+intros e1 e2 He.
+apply Znot_gt_le.
+intros H.
+apply Zlt_not_le with (1 := He).
+apply Zpower_le.
+clear -H ; omega.
+Qed.
+
+End Zpower.
+
+Section Div_Mod.
+
+Theorem Zmod_mod_mult :
+ forall n a b, (0 < a)%Z -> (0 <= b)%Z ->
+ Zmod (Zmod n (a * b)) b = Zmod n b.
+Proof.
+intros n a [|b|b] Ha Hb.
+now rewrite 2!Zmod_0_r.
+rewrite (Zmod_eq n (a * Zpos b)).
+rewrite Zmult_assoc.
+unfold Zminus.
+rewrite Zopp_mult_distr_l.
+apply Z_mod_plus.
+easy.
+apply Zmult_gt_0_compat.
+now apply Zlt_gt.
+easy.
+now elim Hb.
+Qed.
+
+Theorem ZOmod_eq :
+ forall a b,
+ ZOmod a b = (a - ZOdiv a b * b)%Z.
+Proof.
+intros a b.
+rewrite (ZO_div_mod_eq a b) at 2.
+ring.
+Qed.
+
+Theorem ZOmod_mod_mult :
+ forall n a b,
+ ZOmod (ZOmod n (a * b)) b = ZOmod n b.
+Proof.
+intros n a b.
+assert (ZOmod n (a * b) = n + - (ZOdiv n (a * b) * a) * b)%Z.
+rewrite <- Zopp_mult_distr_l.
+rewrite <- Zmult_assoc.
+apply ZOmod_eq.
+rewrite H.
+apply ZO_mod_plus.
+rewrite <- H.
+apply ZOmod_sgn2.
+Qed.
+
+Theorem Zdiv_mod_mult :
+ forall n a b, (0 <= a)%Z -> (0 <= b)%Z ->
+ (Zdiv (Zmod n (a * b)) a) = Zmod (Zdiv n a) b.
+Proof.
+intros n a b Ha Hb.
+destruct (Zle_lt_or_eq _ _ Ha) as [Ha'|Ha'].
+destruct (Zle_lt_or_eq _ _ Hb) as [Hb'|Hb'].
+rewrite (Zmod_eq n (a * b)).
+rewrite (Zmult_comm a b) at 2.
+rewrite Zmult_assoc.
+unfold Zminus.
+rewrite Zopp_mult_distr_l.
+rewrite Z_div_plus by now apply Zlt_gt.
+rewrite <- Zdiv_Zdiv by easy.
+apply sym_eq.
+apply Zmod_eq.
+now apply Zlt_gt.
+now apply Zmult_gt_0_compat ; apply Zlt_gt.
+rewrite <- Hb'.
+rewrite Zmult_0_r, 2!Zmod_0_r.
+apply Zdiv_0_l.
+rewrite <- Ha'.
+now rewrite 2!Zdiv_0_r, Zmod_0_l.
+Qed.
+
+Theorem ZOdiv_mod_mult :
+ forall n a b,
+ (ZOdiv (ZOmod n (a * b)) a) = ZOmod (ZOdiv n a) b.
+Proof.
+intros n a b.
+destruct (Z_eq_dec a 0) as [Za|Za].
+rewrite Za.
+now rewrite 2!ZOdiv_0_r, ZOmod_0_l.
+assert (ZOmod n (a * b) = n + - (ZOdiv (ZOdiv n a) b * b) * a)%Z.
+rewrite (ZOmod_eq n (a * b)) at 1.
+rewrite ZOdiv_ZOdiv.
+ring.
+rewrite H.
+rewrite ZO_div_plus with (2 := Za).
+apply sym_eq.
+apply ZOmod_eq.
+rewrite <- H.
+apply ZOmod_sgn2.
+Qed.
+
+Theorem ZOdiv_small_abs :
+ forall a b,
+ (Zabs a < b)%Z -> ZOdiv a b = Z0.
+Proof.
+intros a b Ha.
+destruct (Zle_or_lt 0 a) as [H|H].
+apply ZOdiv_small.
+split.
+exact H.
+now rewrite Zabs_eq in Ha.
+apply Zopp_inj.
+rewrite <- ZOdiv_opp_l, Zopp_0.
+apply ZOdiv_small.
+generalize (Zabs_non_eq a).
+omega.
+Qed.
+
+Theorem ZOmod_small_abs :
+ forall a b,
+ (Zabs a < b)%Z -> ZOmod a b = a.
+Proof.
+intros a b Ha.
+destruct (Zle_or_lt 0 a) as [H|H].
+apply ZOmod_small.
+split.
+exact H.
+now rewrite Zabs_eq in Ha.
+apply Zopp_inj.
+rewrite <- ZOmod_opp_l.
+apply ZOmod_small.
+generalize (Zabs_non_eq a).
+omega.
+Qed.
+
+Theorem ZOdiv_plus :
+ forall a b c, (0 <= a * b)%Z ->
+ (ZOdiv (a + b) c = ZOdiv a c + ZOdiv b c + ZOdiv (ZOmod a c + ZOmod b c) c)%Z.
+Proof.
+intros a b c Hab.
+destruct (Z_eq_dec c 0) as [Zc|Zc].
+now rewrite Zc, 4!ZOdiv_0_r.
+apply Zmult_reg_r with (1 := Zc).
+rewrite 2!Zmult_plus_distr_l.
+assert (forall d, ZOdiv d c * c = d - ZOmod d c)%Z.
+intros d.
+rewrite ZOmod_eq.
+ring.
+rewrite 4!H.
+rewrite <- ZOplus_mod with (1 := Hab).
+ring.
+Qed.
+
+End Div_Mod.
+
+Section Same_sign.
+
+Theorem Zsame_sign_trans :
+ forall v u w, v <> Z0 ->
+ (0 <= u * v)%Z -> (0 <= v * w)%Z -> (0 <= u * w)%Z.
+Proof.
+intros [|v|v] [|u|u] [|w|w] Zv Huv Hvw ; try easy ; now elim Zv.
+Qed.
+
+Theorem Zsame_sign_trans_weak :
+ forall v u w, (v = Z0 -> w = Z0) ->
+ (0 <= u * v)%Z -> (0 <= v * w)%Z -> (0 <= u * w)%Z.
+Proof.
+intros [|v|v] [|u|u] [|w|w] Zv Huv Hvw ; try easy ; now discriminate Zv.
+Qed.
+
+Theorem Zsame_sign_imp :
+ forall u v,
+ (0 < u -> 0 <= v)%Z ->
+ (0 < -u -> 0 <= -v)%Z ->
+ (0 <= u * v)%Z.
+Proof.
+intros [|u|u] v Hp Hn.
+easy.
+apply Zmult_le_0_compat.
+easy.
+now apply Hp.
+replace (Zneg u * v)%Z with (Zpos u * (-v))%Z.
+apply Zmult_le_0_compat.
+easy.
+now apply Hn.
+rewrite <- Zopp_mult_distr_r.
+apply Zopp_mult_distr_l.
+Qed.
+
+Theorem Zsame_sign_odiv :
+ forall u v, (0 <= v)%Z ->
+ (0 <= u * ZOdiv u v)%Z.
+Proof.
+intros u v Hv.
+apply Zsame_sign_imp ; intros Hu.
+apply ZO_div_pos with (2 := Hv).
+now apply Zlt_le_weak.
+rewrite <- ZOdiv_opp_l.
+apply ZO_div_pos with (2 := Hv).
+now apply Zlt_le_weak.
+Qed.
+
+End Same_sign.
+
+(** Boolean comparisons *)
+
+Section Zeq_bool.
+
+Inductive Zeq_bool_prop (x y : Z) : bool -> Prop :=
+ | Zeq_bool_true_ : x = y -> Zeq_bool_prop x y true
+ | Zeq_bool_false_ : x <> y -> Zeq_bool_prop x y false.
+
+Theorem Zeq_bool_spec :
+ forall x y, Zeq_bool_prop x y (Zeq_bool x y).
+Proof.
+intros x y.
+generalize (Zeq_is_eq_bool x y).
+case (Zeq_bool x y) ; intros (H1, H2) ; constructor.
+now apply H2.
+intros H.
+specialize (H1 H).
+discriminate H1.
+Qed.
+
+Theorem Zeq_bool_true :
+ forall x y, x = y -> Zeq_bool x y = true.
+Proof.
+intros x y.
+apply -> Zeq_is_eq_bool.
+Qed.
+
+Theorem Zeq_bool_false :
+ forall x y, x <> y -> Zeq_bool x y = false.
+Proof.
+intros x y.
+generalize (proj2 (Zeq_is_eq_bool x y)).
+case Zeq_bool.
+intros He Hn.
+elim Hn.
+now apply He.
+now intros _ _.
+Qed.
+
+End Zeq_bool.
+
+Section Zle_bool.
+
+Inductive Zle_bool_prop (x y : Z) : bool -> Prop :=
+ | Zle_bool_true_ : (x <= y)%Z -> Zle_bool_prop x y true
+ | Zle_bool_false_ : (y < x)%Z -> Zle_bool_prop x y false.
+
+Theorem Zle_bool_spec :
+ forall x y, Zle_bool_prop x y (Zle_bool x y).
+Proof.
+intros x y.
+generalize (Zle_is_le_bool x y).
+case Zle_bool ; intros (H1, H2) ; constructor.
+now apply H2.
+destruct (Zle_or_lt x y) as [H|H].
+now specialize (H1 H).
+exact H.
+Qed.
+
+Theorem Zle_bool_true :
+ forall x y : Z,
+ (x <= y)%Z -> Zle_bool x y = true.
+Proof.
+intros x y.
+apply (proj1 (Zle_is_le_bool x y)).
+Qed.
+
+Theorem Zle_bool_false :
+ forall x y : Z,
+ (y < x)%Z -> Zle_bool x y = false.
+Proof.
+intros x y Hxy.
+generalize (Zle_cases x y).
+case Zle_bool ; intros H.
+elim (Zlt_irrefl x).
+now apply Zle_lt_trans with y.
+apply refl_equal.
+Qed.
+
+End Zle_bool.
+
+Section Zlt_bool.
+
+Inductive Zlt_bool_prop (x y : Z) : bool -> Prop :=
+ | Zlt_bool_true_ : (x < y)%Z -> Zlt_bool_prop x y true
+ | Zlt_bool_false_ : (y <= x)%Z -> Zlt_bool_prop x y false.
+
+Theorem Zlt_bool_spec :
+ forall x y, Zlt_bool_prop x y (Zlt_bool x y).
+Proof.
+intros x y.
+generalize (Zlt_is_lt_bool x y).
+case Zlt_bool ; intros (H1, H2) ; constructor.
+now apply H2.
+destruct (Zle_or_lt y x) as [H|H].
+exact H.
+now specialize (H1 H).
+Qed.
+
+Theorem Zlt_bool_true :
+ forall x y : Z,
+ (x < y)%Z -> Zlt_bool x y = true.
+Proof.
+intros x y.
+apply (proj1 (Zlt_is_lt_bool x y)).
+Qed.
+
+Theorem Zlt_bool_false :
+ forall x y : Z,
+ (y <= x)%Z -> Zlt_bool x y = false.
+Proof.
+intros x y Hxy.
+generalize (Zlt_cases x y).
+case Zlt_bool ; intros H.
+elim (Zlt_irrefl x).
+now apply Zlt_le_trans with y.
+apply refl_equal.
+Qed.
+
+Theorem negb_Zle_bool :
+ forall x y : Z,
+ negb (Zle_bool x y) = Zlt_bool y x.
+Proof.
+intros x y.
+case Zle_bool_spec ; intros H.
+now rewrite Zlt_bool_false.
+now rewrite Zlt_bool_true.
+Qed.
+
+Theorem negb_Zlt_bool :
+ forall x y : Z,
+ negb (Zlt_bool x y) = Zle_bool y x.
+Proof.
+intros x y.
+case Zlt_bool_spec ; intros H.
+now rewrite Zle_bool_false.
+now rewrite Zle_bool_true.
+Qed.
+
+End Zlt_bool.
+
+Section Zcompare.
+
+Inductive Zcompare_prop (x y : Z) : comparison -> Prop :=
+ | Zcompare_Lt_ : (x < y)%Z -> Zcompare_prop x y Lt
+ | Zcompare_Eq_ : x = y -> Zcompare_prop x y Eq
+ | Zcompare_Gt_ : (y < x)%Z -> Zcompare_prop x y Gt.
+
+Theorem Zcompare_spec :
+ forall x y, Zcompare_prop x y (Zcompare x y).
+Proof.
+intros x y.
+destruct (Z_dec x y) as [[H|H]|H].
+generalize (Zlt_compare _ _ H).
+case (Zcompare x y) ; try easy.
+now constructor.
+generalize (Zgt_compare _ _ H).
+case (Zcompare x y) ; try easy.
+constructor.
+now apply Zgt_lt.
+generalize (proj2 (Zcompare_Eq_iff_eq _ _) H).
+case (Zcompare x y) ; try easy.
+now constructor.
+Qed.
+
+Theorem Zcompare_Lt :
+ forall x y,
+ (x < y)%Z -> Zcompare x y = Lt.
+Proof.
+easy.
+Qed.
+
+Theorem Zcompare_Eq :
+ forall x y,
+ (x = y)%Z -> Zcompare x y = Eq.
+Proof.
+intros x y.
+apply <- Zcompare_Eq_iff_eq.
+Qed.
+
+Theorem Zcompare_Gt :
+ forall x y,
+ (y < x)%Z -> Zcompare x y = Gt.
+Proof.
+intros x y.
+apply Zlt_gt.
+Qed.
+
+End Zcompare.
+
+Section cond_Zopp.
+
+Definition cond_Zopp (b : bool) m := if b then Zopp m else m.
+
+Theorem abs_cond_Zopp :
+ forall b m,
+ Zabs (cond_Zopp b m) = Zabs m.
+Proof.
+intros [|] m.
+apply Zabs_Zopp.
+apply refl_equal.
+Qed.
+
+Theorem cond_Zopp_Zlt_bool :
+ forall m,
+ cond_Zopp (Zlt_bool m 0) m = Zabs m.
+Proof.
+intros m.
+apply sym_eq.
+case Zlt_bool_spec ; intros Hm.
+apply Zabs_non_eq.
+now apply Zlt_le_weak.
+now apply Zabs_eq.
+Qed.
+
+End cond_Zopp.
diff --git a/flocq/Core/Fcore_defs.v b/flocq/Core/Fcore_defs.v
new file mode 100644
index 0000000..fda3a85
--- /dev/null
+++ b/flocq/Core/Fcore_defs.v
@@ -0,0 +1,101 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Basic definitions: float and rounding property *)
+Require Import Fcore_Raux.
+
+Section Def.
+
+(** Definition of a floating-point number *)
+Record float (beta : radix) := Float { Fnum : Z ; Fexp : Z }.
+
+Implicit Arguments Fnum [[beta]].
+Implicit Arguments Fexp [[beta]].
+
+Variable beta : radix.
+
+Definition F2R (f : float beta) :=
+ (Z2R (Fnum f) * bpow beta (Fexp f))%R.
+
+(** Requirements on a rounding mode *)
+Definition round_pred_total (P : R -> R -> Prop) :=
+ forall x, exists f, P x f.
+
+Definition round_pred_monotone (P : R -> R -> Prop) :=
+ forall x y f g, P x f -> P y g -> (x <= y)%R -> (f <= g)%R.
+
+Definition round_pred (P : R -> R -> Prop) :=
+ round_pred_total P /\
+ round_pred_monotone P.
+
+End Def.
+
+Implicit Arguments Fnum [[beta]].
+Implicit Arguments Fexp [[beta]].
+Implicit Arguments F2R [[beta]].
+
+Section RND.
+
+(** property of being a round toward -inf *)
+Definition Rnd_DN_pt (F : R -> Prop) (x f : R) :=
+ F f /\ (f <= x)%R /\
+ forall g : R, F g -> (g <= x)%R -> (g <= f)%R.
+
+Definition Rnd_DN (F : R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_DN_pt F x (rnd x).
+
+(** property of being a round toward +inf *)
+Definition Rnd_UP_pt (F : R -> Prop) (x f : R) :=
+ F f /\ (x <= f)%R /\
+ forall g : R, F g -> (x <= g)%R -> (f <= g)%R.
+
+Definition Rnd_UP (F : R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_UP_pt F x (rnd x).
+
+(** property of being a round toward zero *)
+Definition Rnd_ZR_pt (F : R -> Prop) (x f : R) :=
+ ( (0 <= x)%R -> Rnd_DN_pt F x f ) /\
+ ( (x <= 0)%R -> Rnd_UP_pt F x f ).
+
+Definition Rnd_ZR (F : R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_ZR_pt F x (rnd x).
+
+(** property of being a round to nearest *)
+Definition Rnd_N_pt (F : R -> Prop) (x f : R) :=
+ F f /\
+ forall g : R, F g -> (Rabs (f - x) <= Rabs (g - x))%R.
+
+Definition Rnd_N (F : R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_N_pt F x (rnd x).
+
+Definition Rnd_NG_pt (F : R -> Prop) (P : R -> R -> Prop) (x f : R) :=
+ Rnd_N_pt F x f /\
+ ( P x f \/ forall f2 : R, Rnd_N_pt F x f2 -> f2 = f ).
+
+Definition Rnd_NG (F : R -> Prop) (P : R -> R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_NG_pt F P x (rnd x).
+
+Definition Rnd_NA_pt (F : R -> Prop) (x f : R) :=
+ Rnd_N_pt F x f /\
+ forall f2 : R, Rnd_N_pt F x f2 -> (Rabs f2 <= Rabs f)%R.
+
+Definition Rnd_NA (F : R -> Prop) (rnd : R -> R) :=
+ forall x : R, Rnd_NA_pt F x (rnd x).
+
+End RND.
diff --git a/flocq/Core/Fcore_digits.v b/flocq/Core/Fcore_digits.v
new file mode 100644
index 0000000..2ae076e
--- /dev/null
+++ b/flocq/Core/Fcore_digits.v
@@ -0,0 +1,899 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+Require Import ZArith.
+Require Import Fcore_Zaux.
+Require Import ZOdiv.
+
+(** Computes the number of bits (radix 2) of a positive integer.
+
+It serves as an upper bound on the number of digits to ensure termination.
+*)
+
+Fixpoint digits2_Pnat (n : positive) : nat :=
+ match n with
+ | xH => O
+ | xO p => S (digits2_Pnat p)
+ | xI p => S (digits2_Pnat p)
+ end.
+
+Theorem digits2_Pnat_correct :
+ forall n,
+ let d := digits2_Pnat n in
+ (Zpower_nat 2 d <= Zpos n < Zpower_nat 2 (S d))%Z.
+Proof.
+intros n d. unfold d. clear.
+assert (Hp: forall m, (Zpower_nat 2 (S m) = 2 * Zpower_nat 2 m)%Z) by easy.
+induction n ; simpl.
+rewrite Zpos_xI, 2!Hp.
+omega.
+rewrite (Zpos_xO n), 2!Hp.
+omega.
+now split.
+Qed.
+
+Section Fcore_digits.
+
+Variable beta : radix.
+
+Definition Zdigit n k := ZOmod (ZOdiv n (Zpower beta k)) beta.
+
+Theorem Zdigit_lt :
+ forall n k,
+ (k < 0)%Z ->
+ Zdigit n k = Z0.
+Proof.
+intros n [|k|k] Hk ; try easy.
+now case n.
+Qed.
+
+Theorem Zdigit_0 :
+ forall k, Zdigit 0 k = Z0.
+Proof.
+intros k.
+unfold Zdigit.
+rewrite ZOdiv_0_l.
+apply ZOmod_0_l.
+Qed.
+
+Theorem Zdigit_opp :
+ forall n k,
+ Zdigit (-n) k = Zopp (Zdigit n k).
+Proof.
+intros n k.
+unfold Zdigit.
+rewrite ZOdiv_opp_l.
+apply ZOmod_opp_l.
+Qed.
+
+Theorem Zdigit_ge_Zpower_pos :
+ forall e n,
+ (0 <= n < Zpower beta e)%Z ->
+ forall k, (e <= k)%Z -> Zdigit n k = Z0.
+Proof.
+intros e n Hn k Hk.
+unfold Zdigit.
+rewrite ZOdiv_small.
+apply ZOmod_0_l.
+split.
+apply Hn.
+apply Zlt_le_trans with (1 := proj2 Hn).
+replace k with (e + (k - e))%Z by ring.
+rewrite Zpower_plus.
+rewrite <- (Zmult_1_r (beta ^ e)) at 1.
+apply Zmult_le_compat_l.
+apply (Zlt_le_succ 0).
+apply Zpower_gt_0.
+now apply Zle_minus_le_0.
+apply Zlt_le_weak.
+now apply Zle_lt_trans with n.
+generalize (Zle_lt_trans _ _ _ (proj1 Hn) (proj2 Hn)).
+clear.
+now destruct e as [|e|e].
+now apply Zle_minus_le_0.
+Qed.
+
+Theorem Zdigit_ge_Zpower :
+ forall e n,
+ (Zabs n < Zpower beta e)%Z ->
+ forall k, (e <= k)%Z -> Zdigit n k = Z0.
+Proof.
+intros e [|n|n] Hn k.
+easy.
+apply Zdigit_ge_Zpower_pos.
+now split.
+intros He.
+change (Zneg n) with (Zopp (Zpos n)).
+rewrite Zdigit_opp.
+rewrite Zdigit_ge_Zpower_pos with (2 := He).
+apply Zopp_0.
+now split.
+Qed.
+
+Theorem Zdigit_not_0_pos :
+ forall e n, (0 <= e)%Z ->
+ (Zpower beta e <= n < Zpower beta (e + 1))%Z ->
+ Zdigit n e <> Z0.
+Proof.
+intros e n He (Hn1,Hn2).
+unfold Zdigit.
+rewrite <- ZOdiv_mod_mult.
+rewrite ZOmod_small.
+intros H.
+apply Zle_not_lt with (1 := Hn1).
+rewrite (ZO_div_mod_eq n (beta ^ e)).
+rewrite H, Zmult_0_r, Zplus_0_l.
+apply ZOmod_lt_pos_pos.
+apply Zle_trans with (2 := Hn1).
+apply Zpower_ge_0.
+now apply Zpower_gt_0.
+split.
+apply Zle_trans with (2 := Hn1).
+apply Zpower_ge_0.
+replace (beta ^ e * beta)%Z with (beta ^ (e + 1))%Z.
+exact Hn2.
+rewrite <- (Zmult_1_r beta) at 3.
+now apply (Zpower_plus beta e 1).
+Qed.
+
+Theorem Zdigit_not_0 :
+ forall e n, (0 <= e)%Z ->
+ (Zpower beta e <= Zabs n < Zpower beta (e + 1))%Z ->
+ Zdigit n e <> Z0.
+Proof.
+intros e n He Hn.
+destruct (Zle_or_lt 0 n) as [Hn'|Hn'].
+rewrite (Zabs_eq _ Hn') in Hn.
+now apply Zdigit_not_0_pos.
+intros H.
+rewrite (Zabs_non_eq n) in Hn by now apply Zlt_le_weak.
+apply (Zdigit_not_0_pos _ _ He Hn).
+now rewrite Zdigit_opp, H.
+Qed.
+
+Theorem Zdigit_mul_pow :
+ forall n k k', (0 <= k')%Z ->
+ Zdigit (n * Zpower beta k') k = Zdigit n (k - k').
+Proof.
+intros n k k' Hk'.
+destruct (Zle_or_lt k' k) as [H|H].
+revert k H.
+pattern k' ; apply Zlt_0_ind with (2 := Hk').
+clear k' Hk'.
+intros k' IHk' Hk' k H.
+unfold Zdigit.
+apply (f_equal (fun x => ZOmod x beta)).
+pattern k at 1 ; replace k with (k - k' + k')%Z by ring.
+rewrite Zpower_plus with (2 := Hk').
+apply ZOdiv_mult_cancel_r.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+now apply Zle_minus_le_0.
+destruct (Zle_or_lt 0 k) as [H0|H0].
+rewrite (Zdigit_lt n) by omega.
+unfold Zdigit.
+replace k' with (k' - k + k)%Z by ring.
+rewrite Zpower_plus with (2 := H0).
+rewrite Zmult_assoc, ZO_div_mult.
+replace (k' - k)%Z with (k' - k - 1 + 1)%Z by ring.
+rewrite Zpower_exp by omega.
+rewrite Zmult_assoc.
+change (Zpower beta 1) with (beta * 1)%Z.
+rewrite Zmult_1_r.
+apply ZO_mod_mult.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+apply Zle_minus_le_0.
+now apply Zlt_le_weak.
+rewrite Zdigit_lt with (1 := H0).
+apply sym_eq.
+apply Zdigit_lt.
+omega.
+Qed.
+
+Theorem Zdigit_div_pow :
+ forall n k k', (0 <= k)%Z -> (0 <= k')%Z ->
+ Zdigit (ZOdiv n (Zpower beta k')) k = Zdigit n (k + k').
+Proof.
+intros n k k' Hk Hk'.
+unfold Zdigit.
+rewrite ZOdiv_ZOdiv.
+rewrite Zplus_comm.
+now rewrite Zpower_plus.
+Qed.
+
+Theorem Zdigit_mod_pow :
+ forall n k k', (k < k')%Z ->
+ Zdigit (ZOmod n (Zpower beta k')) k = Zdigit n k.
+Proof.
+intros n k k' Hk.
+destruct (Zle_or_lt 0 k) as [H|H].
+unfold Zdigit.
+rewrite <- 2!ZOdiv_mod_mult.
+apply (f_equal (fun x => ZOdiv x (beta ^ k))).
+replace k' with (k + 1 + (k' - (k + 1)))%Z by ring.
+rewrite Zpower_exp by omega.
+rewrite Zmult_comm.
+rewrite Zpower_plus by easy.
+change (Zpower beta 1) with (beta * 1)%Z.
+rewrite Zmult_1_r.
+apply ZOmod_mod_mult.
+now rewrite 2!Zdigit_lt.
+Qed.
+
+Theorem Zdigit_mod_pow_out :
+ forall n k k', (0 <= k' <= k)%Z ->
+ Zdigit (ZOmod n (Zpower beta k')) k = Z0.
+Proof.
+intros n k k' Hk.
+unfold Zdigit.
+rewrite ZOdiv_small_abs.
+apply ZOmod_0_l.
+apply Zlt_le_trans with (Zpower beta k').
+rewrite <- (Zabs_eq (beta ^ k')) at 2 by apply Zpower_ge_0.
+apply ZOmod_lt.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+now apply Zpower_le.
+Qed.
+
+Fixpoint Zsum_digit f k :=
+ match k with
+ | O => Z0
+ | S k => (Zsum_digit f k + f (Z_of_nat k) * Zpower beta (Z_of_nat k))%Z
+ end.
+
+Theorem Zsum_digit_digit :
+ forall n k,
+ Zsum_digit (Zdigit n) k = ZOmod n (Zpower beta (Z_of_nat k)).
+Proof.
+intros n.
+induction k.
+apply sym_eq.
+apply ZOmod_1_r.
+simpl Zsum_digit.
+rewrite IHk.
+unfold Zdigit.
+rewrite <- ZOdiv_mod_mult.
+rewrite <- (ZOmod_mod_mult n beta).
+rewrite Zmult_comm.
+replace (beta ^ Z_of_nat k * beta)%Z with (Zpower beta (Z_of_nat (S k))).
+rewrite Zplus_comm, Zmult_comm.
+apply sym_eq.
+apply ZO_div_mod_eq.
+rewrite inj_S.
+rewrite <- (Zmult_1_r beta) at 3.
+apply Zpower_plus.
+apply Zle_0_nat.
+easy.
+Qed.
+
+Theorem Zpower_gt_id :
+ forall n, (n < Zpower beta n)%Z.
+Proof.
+intros [|n|n] ; try easy.
+simpl.
+rewrite Zpower_pos_nat.
+rewrite Zpos_eq_Z_of_nat_o_nat_of_P.
+induction (nat_of_P n).
+easy.
+rewrite inj_S.
+change (Zpower_nat beta (S n0)) with (beta * Zpower_nat beta n0)%Z.
+unfold Zsucc.
+apply Zlt_le_trans with (beta * (Z_of_nat n0 + 1))%Z.
+clear.
+apply Zlt_0_minus_lt.
+replace (beta * (Z_of_nat n0 + 1) - (Z_of_nat n0 + 1))%Z with ((beta - 1) * (Z_of_nat n0 + 1))%Z by ring.
+apply Zmult_lt_0_compat.
+cut (2 <= beta)%Z. omega.
+apply Zle_bool_imp_le.
+apply beta.
+apply (Zle_lt_succ 0).
+apply Zle_0_nat.
+apply Zmult_le_compat_l.
+now apply Zlt_le_succ.
+apply Zle_trans with 2%Z.
+easy.
+apply Zle_bool_imp_le.
+apply beta.
+Qed.
+
+Theorem Zdigit_ext :
+ forall n1 n2,
+ (forall k, (0 <= k)%Z -> Zdigit n1 k = Zdigit n2 k) ->
+ n1 = n2.
+Proof.
+intros n1 n2 H.
+rewrite <- (ZOmod_small_abs n1 (Zpower beta (Zmax (Zabs n1) (Zabs n2)))).
+rewrite <- (ZOmod_small_abs n2 (Zpower beta (Zmax (Zabs n1) (Zabs n2)))) at 2.
+replace (Zmax (Zabs n1) (Zabs n2)) with (Z_of_nat (Zabs_nat (Zmax (Zabs n1) (Zabs n2)))).
+rewrite <- 2!Zsum_digit_digit.
+induction (Zabs_nat (Zmax (Zabs n1) (Zabs n2))).
+easy.
+simpl.
+rewrite H, IHn.
+apply refl_equal.
+apply Zle_0_nat.
+rewrite inj_Zabs_nat.
+apply Zabs_eq.
+apply Zle_trans with (Zabs n1).
+apply Zabs_pos.
+apply Zle_max_l.
+apply Zlt_le_trans with (Zpower beta (Zabs n2)).
+apply Zpower_gt_id.
+apply Zpower_le.
+apply Zle_max_r.
+apply Zlt_le_trans with (Zpower beta (Zabs n1)).
+apply Zpower_gt_id.
+apply Zpower_le.
+apply Zle_max_l.
+Qed.
+
+Theorem ZOmod_plus_pow_digit :
+ forall u v n, (0 <= u * v)%Z ->
+ (forall k, (0 <= k < n)%Z -> Zdigit u k = Z0 \/ Zdigit v k = Z0) ->
+ ZOmod (u + v) (Zpower beta n) = (ZOmod u (Zpower beta n) + ZOmod v (Zpower beta n))%Z.
+Proof.
+intros u v n Huv Hd.
+destruct (Zle_or_lt 0 n) as [Hn|Hn].
+rewrite ZOplus_mod with (1 := Huv).
+apply ZOmod_small_abs.
+generalize (Zle_refl n).
+pattern n at -2 ; rewrite <- Zabs_eq with (1 := Hn).
+rewrite <- (inj_Zabs_nat n).
+induction (Zabs_nat n) as [|p IHp].
+now rewrite 2!ZOmod_1_r.
+rewrite <- 2!Zsum_digit_digit.
+simpl Zsum_digit.
+rewrite inj_S.
+intros Hn'.
+replace (Zsum_digit (Zdigit u) p + Zdigit u (Z_of_nat p) * beta ^ Z_of_nat p +
+ (Zsum_digit (Zdigit v) p + Zdigit v (Z_of_nat p) * beta ^ Z_of_nat p))%Z with
+ (Zsum_digit (Zdigit u) p + Zsum_digit (Zdigit v) p +
+ (Zdigit u (Z_of_nat p) + Zdigit v (Z_of_nat p)) * beta ^ Z_of_nat p)%Z by ring.
+apply (Zle_lt_trans _ _ _ (Zabs_triangle _ _)).
+replace (beta ^ Zsucc (Z_of_nat p))%Z with (beta ^ Z_of_nat p + (beta - 1) * beta ^ Z_of_nat p)%Z.
+apply Zplus_lt_le_compat.
+rewrite 2!Zsum_digit_digit.
+apply IHp.
+now apply Zle_succ_le.
+rewrite Zabs_Zmult.
+rewrite (Zabs_eq (beta ^ Z_of_nat p)) by apply Zpower_ge_0.
+apply Zmult_le_compat_r. 2: apply Zpower_ge_0.
+apply Zlt_succ_le.
+assert (forall u v, Zabs (Zdigit u v) < Zsucc (beta - 1))%Z.
+clear ; intros n k.
+assert (0 < beta)%Z.
+apply Zlt_le_trans with 2%Z.
+apply refl_equal.
+apply Zle_bool_imp_le.
+apply beta.
+replace (Zsucc (beta - 1)) with (Zabs beta).
+apply ZOmod_lt.
+now apply Zgt_not_eq.
+rewrite Zabs_eq.
+apply Zsucc_pred.
+now apply Zlt_le_weak.
+assert (0 <= Z_of_nat p < n)%Z.
+split.
+apply Zle_0_nat.
+apply Zgt_lt.
+now apply Zle_succ_gt.
+destruct (Hd (Z_of_nat p) H0) as [K|K] ; rewrite K.
+apply H.
+rewrite Zplus_0_r.
+apply H.
+unfold Zsucc.
+ring_simplify.
+rewrite Zpower_plus.
+change (beta ^1)%Z with (beta * 1)%Z.
+now rewrite Zmult_1_r.
+apply Zle_0_nat.
+easy.
+destruct n as [|n|n] ; try easy.
+now rewrite 3!ZOmod_0_r.
+Qed.
+
+Theorem ZOdiv_plus_pow_digit :
+ forall u v n, (0 <= u * v)%Z ->
+ (forall k, (0 <= k < n)%Z -> Zdigit u k = Z0 \/ Zdigit v k = Z0) ->
+ ZOdiv (u + v) (Zpower beta n) = (ZOdiv u (Zpower beta n) + ZOdiv v (Zpower beta n))%Z.
+Proof.
+intros u v n Huv Hd.
+rewrite <- (Zplus_0_r (ZOdiv u (Zpower beta n) + ZOdiv v (Zpower beta n))).
+rewrite ZOdiv_plus with (1 := Huv).
+rewrite <- ZOmod_plus_pow_digit by assumption.
+apply f_equal.
+destruct (Zle_or_lt 0 n) as [Hn|Hn].
+apply ZOdiv_small_abs.
+rewrite <- Zabs_eq.
+apply ZOmod_lt.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+apply Zpower_ge_0.
+clear -Hn.
+destruct n as [|n|n] ; try easy.
+apply ZOdiv_0_r.
+Qed.
+
+Theorem Zdigit_plus :
+ forall u v, (0 <= u * v)%Z ->
+ (forall k, (0 <= k)%Z -> Zdigit u k = Z0 \/ Zdigit v k = Z0) ->
+ forall k,
+ Zdigit (u + v) k = (Zdigit u k + Zdigit v k)%Z.
+Proof.
+intros u v Huv Hd k.
+destruct (Zle_or_lt 0 k) as [Hk|Hk].
+unfold Zdigit.
+rewrite ZOdiv_plus_pow_digit with (1 := Huv).
+rewrite <- (Zmult_1_r beta) at 3 5 7.
+change (beta * 1)%Z with (beta ^1)%Z.
+apply ZOmod_plus_pow_digit.
+apply Zsame_sign_trans_weak with v.
+intros Zv ; rewrite Zv.
+apply ZOdiv_0_l.
+rewrite Zmult_comm.
+apply Zsame_sign_trans_weak with u.
+intros Zu ; rewrite Zu.
+apply ZOdiv_0_l.
+now rewrite Zmult_comm.
+apply Zsame_sign_odiv.
+apply Zpower_ge_0.
+apply Zsame_sign_odiv.
+apply Zpower_ge_0.
+intros k' (Hk1,Hk2).
+rewrite 2!Zdigit_div_pow by assumption.
+apply Hd.
+now apply Zplus_le_0_compat.
+intros k' (Hk1,Hk2).
+now apply Hd.
+now rewrite 3!Zdigit_lt.
+Qed.
+
+Definition Zscale n k :=
+ if Zle_bool 0 k then (n * Zpower beta k)%Z else ZOdiv n (Zpower beta (-k)).
+
+Theorem Zdigit_scale :
+ forall n k k', (0 <= k')%Z ->
+ Zdigit (Zscale n k) k' = Zdigit n (k' - k).
+Proof.
+intros n k k' Hk'.
+unfold Zscale.
+case Zle_bool_spec ; intros Hk.
+now apply Zdigit_mul_pow.
+apply Zdigit_div_pow with (1 := Hk').
+omega.
+Qed.
+
+Theorem Zscale_0 :
+ forall k,
+ Zscale 0 k = Z0.
+Proof.
+intros k.
+unfold Zscale.
+case Zle_bool.
+apply Zmult_0_l.
+apply ZOdiv_0_l.
+Qed.
+
+Theorem Zsame_sign_scale :
+ forall n k,
+ (0 <= n * Zscale n k)%Z.
+Proof.
+intros n k.
+unfold Zscale.
+case Zle_bool_spec ; intros Hk.
+rewrite Zmult_assoc.
+apply Zmult_le_0_compat.
+apply Zsame_sign_imp ; apply Zlt_le_weak.
+apply Zpower_ge_0.
+apply Zsame_sign_odiv.
+apply Zpower_ge_0.
+Qed.
+
+Theorem Zscale_mul_pow :
+ forall n k k', (0 <= k)%Z ->
+ Zscale (n * Zpower beta k) k' = Zscale n (k + k').
+Proof.
+intros n k k' Hk.
+unfold Zscale.
+case Zle_bool_spec ; intros Hk'.
+rewrite Zle_bool_true.
+rewrite <- Zmult_assoc.
+apply f_equal.
+now rewrite Zpower_plus.
+now apply Zplus_le_0_compat.
+case Zle_bool_spec ; intros Hk''.
+pattern k at 1 ; replace k with (k + k' + -k')%Z by ring.
+assert (0 <= -k')%Z by omega.
+rewrite Zpower_plus by easy.
+rewrite Zmult_assoc, ZO_div_mult.
+apply refl_equal.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+replace (-k')%Z with (-(k+k') + k)%Z by ring.
+rewrite Zpower_plus with (2 := Hk).
+apply ZOdiv_mult_cancel_r.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+omega.
+Qed.
+
+Theorem Zscale_scale :
+ forall n k k', (0 <= k)%Z ->
+ Zscale (Zscale n k) k' = Zscale n (k + k').
+Proof.
+intros n k k' Hk.
+unfold Zscale at 2.
+rewrite Zle_bool_true with (1 := Hk).
+now apply Zscale_mul_pow.
+Qed.
+
+Definition Zslice n k1 k2 :=
+ if Zle_bool 0 k2 then ZOmod (Zscale n (-k1)) (Zpower beta k2) else Z0.
+
+Theorem Zdigit_slice :
+ forall n k1 k2 k, (0 <= k < k2)%Z ->
+ Zdigit (Zslice n k1 k2) k = Zdigit n (k1 + k).
+Proof.
+intros n k1 k2 k Hk.
+unfold Zslice.
+rewrite Zle_bool_true.
+rewrite Zdigit_mod_pow by apply Hk.
+rewrite Zdigit_scale by apply Hk.
+unfold Zminus.
+now rewrite Zopp_involutive, Zplus_comm.
+omega.
+Qed.
+
+Theorem Zdigit_slice_out :
+ forall n k1 k2 k, (k2 <= k)%Z ->
+ Zdigit (Zslice n k1 k2) k = Z0.
+Proof.
+intros n k1 k2 k Hk.
+unfold Zslice.
+case Zle_bool_spec ; intros Hk2.
+apply Zdigit_mod_pow_out.
+now split.
+apply Zdigit_0.
+Qed.
+
+Theorem Zslice_0 :
+ forall k k',
+ Zslice 0 k k' = Z0.
+Proof.
+intros k k'.
+unfold Zslice.
+case Zle_bool.
+rewrite Zscale_0.
+apply ZOmod_0_l.
+apply refl_equal.
+Qed.
+
+Theorem Zsame_sign_slice :
+ forall n k k',
+ (0 <= n * Zslice n k k')%Z.
+Proof.
+intros n k k'.
+unfold Zslice.
+case Zle_bool.
+apply Zsame_sign_trans_weak with (Zscale n (-k)).
+intros H ; rewrite H.
+apply ZOmod_0_l.
+apply Zsame_sign_scale.
+rewrite Zmult_comm.
+apply ZOmod_sgn2.
+now rewrite Zmult_0_r.
+Qed.
+
+Theorem Zslice_slice :
+ forall n k1 k2 k1' k2', (0 <= k1' <= k2)%Z ->
+ Zslice (Zslice n k1 k2) k1' k2' = Zslice n (k1 + k1') (Zmin (k2 - k1') k2').
+Proof.
+intros n k1 k2 k1' k2' Hk1'.
+destruct (Zle_or_lt 0 k2') as [Hk2'|Hk2'].
+apply Zdigit_ext.
+intros k Hk.
+destruct (Zle_or_lt (Zmin (k2 - k1') k2') k) as [Hk'|Hk'].
+rewrite (Zdigit_slice_out n (k1 + k1')) with (1 := Hk').
+destruct (Zle_or_lt k2' k) as [Hk''|Hk''].
+now apply Zdigit_slice_out.
+rewrite Zdigit_slice by now split.
+apply Zdigit_slice_out.
+zify ; omega.
+rewrite Zdigit_slice by (zify ; omega).
+rewrite (Zdigit_slice n (k1 + k1')) by now split.
+rewrite Zdigit_slice.
+now rewrite Zplus_assoc.
+zify ; omega.
+unfold Zslice.
+rewrite Zmin_r.
+now rewrite Zle_bool_false.
+omega.
+Qed.
+
+Theorem Zslice_mul_pow :
+ forall n k k1 k2, (0 <= k)%Z ->
+ Zslice (n * Zpower beta k) k1 k2 = Zslice n (k1 - k) k2.
+Proof.
+intros n k k1 k2 Hk.
+unfold Zslice.
+case Zle_bool_spec ; intros Hk2.
+2: apply refl_equal.
+rewrite Zscale_mul_pow with (1 := Hk).
+now replace (- (k1 - k))%Z with (k + -k1)%Z by ring.
+Qed.
+
+Theorem Zslice_div_pow :
+ forall n k k1 k2, (0 <= k)%Z -> (0 <= k1)%Z ->
+ Zslice (ZOdiv n (Zpower beta k)) k1 k2 = Zslice n (k1 + k) k2.
+Proof.
+intros n k k1 k2 Hk Hk1.
+unfold Zslice.
+case Zle_bool_spec ; intros Hk2.
+2: apply refl_equal.
+apply (f_equal (fun x => ZOmod x (beta ^ k2))).
+unfold Zscale.
+case Zle_bool_spec ; intros Hk1'.
+replace k1 with Z0 by omega.
+case Zle_bool_spec ; intros Hk'.
+replace k with Z0 by omega.
+simpl.
+now rewrite ZOdiv_1_r.
+rewrite Zopp_involutive.
+apply Zmult_1_r.
+rewrite Zle_bool_false by omega.
+rewrite 2!Zopp_involutive, Zplus_comm.
+rewrite Zpower_plus by assumption.
+apply ZOdiv_ZOdiv.
+Qed.
+
+Theorem Zslice_scale :
+ forall n k k1 k2, (0 <= k1)%Z ->
+ Zslice (Zscale n k) k1 k2 = Zslice n (k1 - k) k2.
+Proof.
+intros n k k1 k2 Hk1.
+unfold Zscale.
+case Zle_bool_spec; intros Hk.
+now apply Zslice_mul_pow.
+apply Zslice_div_pow with (2 := Hk1).
+omega.
+Qed.
+
+Theorem Zslice_div_pow_scale :
+ forall n k k1 k2, (0 <= k)%Z ->
+ Zslice (ZOdiv n (Zpower beta k)) k1 k2 = Zscale (Zslice n k (k1 + k2)) (-k1).
+Proof.
+intros n k k1 k2 Hk.
+apply Zdigit_ext.
+intros k' Hk'.
+rewrite Zdigit_scale with (1 := Hk').
+unfold Zminus.
+rewrite (Zplus_comm k'), Zopp_involutive.
+destruct (Zle_or_lt k2 k') as [Hk2|Hk2].
+rewrite Zdigit_slice_out with (1 := Hk2).
+apply sym_eq.
+apply Zdigit_slice_out.
+now apply Zplus_le_compat_l.
+rewrite Zdigit_slice by now split.
+destruct (Zle_or_lt 0 (k1 + k')) as [Hk1'|Hk1'].
+rewrite Zdigit_slice by omega.
+rewrite Zdigit_div_pow by assumption.
+apply f_equal.
+ring.
+now rewrite 2!Zdigit_lt.
+Qed.
+
+Theorem Zplus_slice :
+ forall n k l1 l2, (0 <= l1)%Z -> (0 <= l2)%Z ->
+ (Zslice n k l1 + Zscale (Zslice n (k + l1) l2) l1)%Z = Zslice n k (l1 + l2).
+Proof.
+intros n k1 l1 l2 Hl1 Hl2.
+clear Hl1.
+apply Zdigit_ext.
+intros k Hk.
+rewrite Zdigit_plus.
+rewrite Zdigit_scale with (1 := Hk).
+destruct (Zle_or_lt (l1 + l2) k) as [Hk2|Hk2].
+rewrite Zdigit_slice_out with (1 := Hk2).
+now rewrite 2!Zdigit_slice_out by omega.
+rewrite Zdigit_slice with (1 := conj Hk Hk2).
+destruct (Zle_or_lt l1 k) as [Hk1|Hk1].
+rewrite Zdigit_slice_out with (1 := Hk1).
+rewrite Zdigit_slice by omega.
+simpl ; apply f_equal.
+ring.
+rewrite Zdigit_slice with (1 := conj Hk Hk1).
+rewrite (Zdigit_lt _ (k - l1)) by omega.
+apply Zplus_0_r.
+rewrite Zmult_comm.
+apply Zsame_sign_trans_weak with n.
+intros H ; rewrite H.
+apply Zslice_0.
+rewrite Zmult_comm.
+apply Zsame_sign_trans_weak with (Zslice n (k1 + l1) l2).
+intros H ; rewrite H.
+apply Zscale_0.
+apply Zsame_sign_slice.
+apply Zsame_sign_scale.
+apply Zsame_sign_slice.
+clear k Hk ; intros k Hk.
+rewrite Zdigit_scale with (1 := Hk).
+destruct (Zle_or_lt l1 k) as [Hk1|Hk1].
+left.
+now apply Zdigit_slice_out.
+right.
+apply Zdigit_lt.
+omega.
+Qed.
+
+Section digits_aux.
+
+Variable p : Z.
+Hypothesis Hp : (0 <= p)%Z.
+
+Fixpoint Zdigits_aux (nb pow : Z) (n : nat) { struct n } : Z :=
+ match n with
+ | O => nb
+ | S n => if Zlt_bool p pow then nb else Zdigits_aux (nb + 1) (Zmult beta pow) n
+ end.
+
+End digits_aux.
+(** Number of digits of an integer *)
+Definition Zdigits n :=
+ match n with
+ | Z0 => Z0
+ | Zneg p => Zdigits_aux (Zpos p) 1 beta (digits2_Pnat p)
+ | Zpos p => Zdigits_aux n 1 beta (digits2_Pnat p)
+ end.
+
+Theorem Zdigits_correct :
+ forall n,
+ (Zpower beta (Zdigits n - 1) <= Zabs n < Zpower beta (Zdigits n))%Z.
+Proof.
+cut (forall p, Zpower beta (Zdigits (Zpos p) - 1) <= Zpos p < Zpower beta (Zdigits (Zpos p)))%Z.
+intros H [|n|n] ; try exact (H n).
+now split.
+intros n.
+simpl.
+(* *)
+assert (U: (Zpos n < Zpower beta (Z_of_nat (S (digits2_Pnat n))))%Z).
+apply Zlt_le_trans with (1 := proj2 (digits2_Pnat_correct n)).
+rewrite Zpower_Zpower_nat.
+rewrite Zabs_nat_Z_of_nat.
+induction (S (digits2_Pnat n)).
+easy.
+rewrite 2!(Zpower_nat_S).
+apply Zmult_le_compat with (2 := IHn0).
+apply Zle_bool_imp_le.
+apply beta.
+easy.
+rewrite <- (Zabs_nat_Z_of_nat n0).
+rewrite <- Zpower_Zpower_nat.
+apply (Zpower_ge_0 (Build_radix 2 (refl_equal true))).
+apply Zle_0_nat.
+apply Zle_0_nat.
+(* *)
+revert U.
+rewrite inj_S.
+unfold Zsucc.
+generalize (digits2_Pnat n).
+intros u U.
+pattern (radix_val beta) at 2 4 ; replace (radix_val beta) with (Zpower beta 1) by apply Zmult_1_r.
+assert (V: (Zpower beta (1 - 1) <= Zpos n)%Z).
+now apply (Zlt_le_succ 0).
+generalize (conj V U).
+clear.
+generalize (Zle_refl 1).
+generalize 1%Z at 2 3 5 6 7 9 10.
+(* *)
+induction u.
+easy.
+rewrite inj_S; unfold Zsucc.
+simpl Zdigits_aux.
+intros v Hv U.
+case Zlt_bool_spec ; intros K.
+now split.
+pattern (radix_val beta) at 2 5 ; replace (radix_val beta) with (Zpower beta 1) by apply Zmult_1_r.
+rewrite <- Zpower_plus.
+rewrite Zplus_comm.
+apply IHu.
+clear -Hv ; omega.
+split.
+now ring_simplify (1 + v - 1)%Z.
+now rewrite Zplus_assoc.
+easy.
+apply Zle_succ_le with (1 := Hv).
+Qed.
+
+Theorem Zdigits_abs :
+ forall n, Zdigits (Zabs n) = Zdigits n.
+Proof.
+now intros [|n|n].
+Qed.
+
+Theorem Zdigits_gt_0 :
+ forall n, n <> Z0 -> (0 < Zdigits n)%Z.
+Proof.
+intros n Zn.
+rewrite <- (Zdigits_abs n).
+assert (Hn: (0 < Zabs n)%Z).
+destruct n ; try easy.
+now elim Zn.
+destruct (Zabs n) as [|p|p] ; try easy ; clear.
+simpl.
+generalize 1%Z (radix_val beta) (refl_equal Lt : (0 < 1)%Z).
+induction (digits2_Pnat p).
+easy.
+simpl.
+intros.
+case Zlt_bool.
+exact H.
+apply IHn.
+now apply Zlt_lt_succ.
+Qed.
+
+Theorem Zdigits_ge_0 :
+ forall n, (0 <= Zdigits n)%Z.
+Proof.
+intros n.
+destruct (Z_eq_dec n 0) as [H|H].
+now rewrite H.
+apply Zlt_le_weak.
+now apply Zdigits_gt_0.
+Qed.
+
+Theorem Zdigit_out :
+ forall n k, (Zdigits n <= k)%Z ->
+ Zdigit n k = Z0.
+Proof.
+intros n k Hk.
+apply Zdigit_ge_Zpower with (2 := Hk).
+apply Zdigits_correct.
+Qed.
+
+Theorem Zdigit_digits :
+ forall n, n <> Z0 ->
+ Zdigit n (Zdigits n - 1) <> Z0.
+Proof.
+intros n Zn.
+apply Zdigit_not_0.
+apply Zlt_0_le_0_pred.
+now apply Zdigits_gt_0.
+ring_simplify (Zdigits n - 1 + 1)%Z.
+apply Zdigits_correct.
+Qed.
+
+Theorem Zdigits_slice :
+ forall n k l, (0 <= l)%Z ->
+ (Zdigits (Zslice n k l) <= l)%Z.
+Proof.
+intros n k l Hl.
+unfold Zslice.
+rewrite Zle_bool_true with (1 := Hl).
+destruct (Zdigits_correct (ZOmod (Zscale n (- k)) (Zpower beta l))) as (H1,H2).
+apply Zpower_lt_Zpower with beta.
+apply Zle_lt_trans with (1 := H1).
+rewrite <- (Zabs_eq (beta ^ l)) at 2 by apply Zpower_ge_0.
+apply ZOmod_lt.
+apply Zgt_not_eq.
+now apply Zpower_gt_0.
+Qed.
+
+End Fcore_digits.
diff --git a/flocq/Core/Fcore_float_prop.v b/flocq/Core/Fcore_float_prop.v
new file mode 100644
index 0000000..746f7a6
--- /dev/null
+++ b/flocq/Core/Fcore_float_prop.v
@@ -0,0 +1,488 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Basic properties of floating-point formats: lemmas about mantissa, exponent... *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+
+Section Float_prop.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Theorem Rcompare_F2R :
+ forall e m1 m2 : Z,
+ Rcompare (F2R (Float beta m1 e)) (F2R (Float beta m2 e)) = Zcompare m1 m2.
+Proof.
+intros e m1 m2.
+unfold F2R. simpl.
+rewrite Rcompare_mult_r.
+apply Rcompare_Z2R.
+apply bpow_gt_0.
+Qed.
+
+(** Basic facts *)
+Theorem F2R_le_reg :
+ forall e m1 m2 : Z,
+ (F2R (Float beta m1 e) <= F2R (Float beta m2 e))%R ->
+ (m1 <= m2)%Z.
+Proof.
+intros e m1 m2 H.
+apply le_Z2R.
+apply Rmult_le_reg_r with (bpow e).
+apply bpow_gt_0.
+exact H.
+Qed.
+
+Theorem F2R_le_compat :
+ forall m1 m2 e : Z,
+ (m1 <= m2)%Z ->
+ (F2R (Float beta m1 e) <= F2R (Float beta m2 e))%R.
+Proof.
+intros m1 m2 e H.
+unfold F2R. simpl.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+now apply Z2R_le.
+Qed.
+
+Theorem F2R_lt_reg :
+ forall e m1 m2 : Z,
+ (F2R (Float beta m1 e) < F2R (Float beta m2 e))%R ->
+ (m1 < m2)%Z.
+Proof.
+intros e m1 m2 H.
+apply lt_Z2R.
+apply Rmult_lt_reg_r with (bpow e).
+apply bpow_gt_0.
+exact H.
+Qed.
+
+Theorem F2R_lt_compat :
+ forall e m1 m2 : Z,
+ (m1 < m2)%Z ->
+ (F2R (Float beta m1 e) < F2R (Float beta m2 e))%R.
+Proof.
+intros e m1 m2 H.
+unfold F2R. simpl.
+apply Rmult_lt_compat_r.
+apply bpow_gt_0.
+now apply Z2R_lt.
+Qed.
+
+Theorem F2R_eq_compat :
+ forall e m1 m2 : Z,
+ (m1 = m2)%Z ->
+ (F2R (Float beta m1 e) = F2R (Float beta m2 e))%R.
+Proof.
+intros e m1 m2 H.
+now apply (f_equal (fun m => F2R (Float beta m e))).
+Qed.
+
+Theorem F2R_eq_reg :
+ forall e m1 m2 : Z,
+ F2R (Float beta m1 e) = F2R (Float beta m2 e) ->
+ m1 = m2.
+Proof.
+intros e m1 m2 H.
+apply Zle_antisym ;
+ apply F2R_le_reg with e ;
+ rewrite H ;
+ apply Rle_refl.
+Qed.
+
+Theorem F2R_Zabs:
+ forall m e : Z,
+ F2R (Float beta (Zabs m) e) = Rabs (F2R (Float beta m e)).
+Proof.
+intros m e.
+unfold F2R.
+rewrite Rabs_mult.
+rewrite <- Z2R_abs.
+simpl.
+apply f_equal.
+apply sym_eq; apply Rabs_right.
+apply Rle_ge.
+apply bpow_ge_0.
+Qed.
+
+Theorem F2R_Zopp :
+ forall m e : Z,
+ F2R (Float beta (Zopp m) e) = Ropp (F2R (Float beta m e)).
+Proof.
+intros m e.
+unfold F2R. simpl.
+rewrite <- Ropp_mult_distr_l_reverse.
+now rewrite Z2R_opp.
+Qed.
+
+(** Sign facts *)
+Theorem F2R_0 :
+ forall e : Z,
+ F2R (Float beta 0 e) = R0.
+Proof.
+intros e.
+unfold F2R. simpl.
+apply Rmult_0_l.
+Qed.
+
+Theorem F2R_eq_0_reg :
+ forall m e : Z,
+ F2R (Float beta m e) = R0 ->
+ m = Z0.
+Proof.
+intros m e H.
+apply F2R_eq_reg with e.
+now rewrite F2R_0.
+Qed.
+
+Theorem F2R_ge_0_reg :
+ forall m e : Z,
+ (0 <= F2R (Float beta m e))%R ->
+ (0 <= m)%Z.
+Proof.
+intros m e H.
+apply F2R_le_reg with e.
+now rewrite F2R_0.
+Qed.
+
+Theorem F2R_le_0_reg :
+ forall m e : Z,
+ (F2R (Float beta m e) <= 0)%R ->
+ (m <= 0)%Z.
+Proof.
+intros m e H.
+apply F2R_le_reg with e.
+now rewrite F2R_0.
+Qed.
+
+Theorem F2R_gt_0_reg :
+ forall m e : Z,
+ (0 < F2R (Float beta m e))%R ->
+ (0 < m)%Z.
+Proof.
+intros m e H.
+apply F2R_lt_reg with e.
+now rewrite F2R_0.
+Qed.
+
+Theorem F2R_lt_0_reg :
+ forall m e : Z,
+ (F2R (Float beta m e) < 0)%R ->
+ (m < 0)%Z.
+Proof.
+intros m e H.
+apply F2R_lt_reg with e.
+now rewrite F2R_0.
+Qed.
+
+Theorem F2R_ge_0_compat :
+ forall f : float beta,
+ (0 <= Fnum f)%Z ->
+ (0 <= F2R f)%R.
+Proof.
+intros f H.
+rewrite <- F2R_0 with (Fexp f).
+now apply F2R_le_compat.
+Qed.
+
+Theorem F2R_le_0_compat :
+ forall f : float beta,
+ (Fnum f <= 0)%Z ->
+ (F2R f <= 0)%R.
+Proof.
+intros f H.
+rewrite <- F2R_0 with (Fexp f).
+now apply F2R_le_compat.
+Qed.
+
+Theorem F2R_gt_0_compat :
+ forall f : float beta,
+ (0 < Fnum f)%Z ->
+ (0 < F2R f)%R.
+Proof.
+intros f H.
+rewrite <- F2R_0 with (Fexp f).
+now apply F2R_lt_compat.
+Qed.
+
+Theorem F2R_lt_0_compat :
+ forall f : float beta,
+ (Fnum f < 0)%Z ->
+ (F2R f < 0)%R.
+Proof.
+intros f H.
+rewrite <- F2R_0 with (Fexp f).
+now apply F2R_lt_compat.
+Qed.
+
+(** Floats and bpow *)
+Theorem F2R_bpow :
+ forall e : Z,
+ F2R (Float beta 1 e) = bpow e.
+Proof.
+intros e.
+unfold F2R. simpl.
+apply Rmult_1_l.
+Qed.
+
+Theorem bpow_le_F2R :
+ forall m e : Z,
+ (0 < m)%Z ->
+ (bpow e <= F2R (Float beta m e))%R.
+Proof.
+intros m e H.
+rewrite <- F2R_bpow.
+apply F2R_le_compat.
+now apply (Zlt_le_succ 0).
+Qed.
+
+Theorem F2R_p1_le_bpow :
+ forall m e1 e2 : Z,
+ (0 < m)%Z ->
+ (F2R (Float beta m e1) < bpow e2)%R ->
+ (F2R (Float beta (m + 1) e1) <= bpow e2)%R.
+Proof.
+intros m e1 e2 Hm.
+intros H.
+assert (He : (e1 <= e2)%Z).
+(* . *)
+apply (le_bpow beta).
+apply Rle_trans with (F2R (Float beta m e1)).
+unfold F2R. simpl.
+rewrite <- (Rmult_1_l (bpow e1)) at 1.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply (Z2R_le 1).
+now apply (Zlt_le_succ 0).
+now apply Rlt_le.
+(* . *)
+revert H.
+replace e2 with (e2 - e1 + e1)%Z by ring.
+rewrite bpow_plus.
+unfold F2R. simpl.
+rewrite <- (Z2R_Zpower beta (e2 - e1)).
+intros H.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Rmult_lt_reg_r in H.
+apply Z2R_le.
+apply Zlt_le_succ.
+now apply lt_Z2R.
+apply bpow_gt_0.
+now apply Zle_minus_le_0.
+Qed.
+
+Theorem bpow_le_F2R_m1 :
+ forall m e1 e2 : Z,
+ (1 < m)%Z ->
+ (bpow e2 < F2R (Float beta m e1))%R ->
+ (bpow e2 <= F2R (Float beta (m - 1) e1))%R.
+Proof.
+intros m e1 e2 Hm.
+case (Zle_or_lt e1 e2); intros He.
+replace e2 with (e2 - e1 + e1)%Z by ring.
+rewrite bpow_plus.
+unfold F2R. simpl.
+rewrite <- (Z2R_Zpower beta (e2 - e1)).
+intros H.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Rmult_lt_reg_r in H.
+apply Z2R_le.
+rewrite (Zpred_succ (Zpower _ _)).
+apply Zplus_le_compat_r.
+apply Zlt_le_succ.
+now apply lt_Z2R.
+apply bpow_gt_0.
+now apply Zle_minus_le_0.
+intros H.
+apply Rle_trans with (1*bpow e1)%R.
+rewrite Rmult_1_l.
+apply bpow_le.
+now apply Zlt_le_weak.
+unfold F2R. simpl.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 1%R with (Z2R 1) by reflexivity.
+apply Z2R_le.
+omega.
+Qed.
+
+Theorem F2R_lt_bpow :
+ forall f : float beta, forall e',
+ (Zabs (Fnum f) < Zpower beta (e' - Fexp f))%Z ->
+ (Rabs (F2R f) < bpow e')%R.
+Proof.
+intros (m, e) e' Hm.
+rewrite <- F2R_Zabs.
+destruct (Zle_or_lt e e') as [He|He].
+unfold F2R. simpl.
+apply Rmult_lt_reg_r with (bpow (-e)).
+apply bpow_gt_0.
+rewrite Rmult_assoc, <- 2!bpow_plus, Zplus_opp_r, Rmult_1_r.
+rewrite <-Z2R_Zpower. 2: now apply Zle_left.
+now apply Z2R_lt.
+elim Zlt_not_le with (1 := Hm).
+simpl.
+cut (e' - e < 0)%Z. 2: omega.
+clear.
+case (e' - e)%Z ; try easy.
+intros p _.
+apply Zabs_pos.
+Qed.
+
+Theorem F2R_change_exp :
+ forall e' m e : Z,
+ (e' <= e)%Z ->
+ F2R (Float beta m e) = F2R (Float beta (m * Zpower beta (e - e')) e').
+Proof.
+intros e' m e He.
+unfold F2R. simpl.
+rewrite Z2R_mult, Z2R_Zpower, Rmult_assoc.
+apply f_equal.
+pattern e at 1 ; replace e with (e - e' + e')%Z by ring.
+apply bpow_plus.
+now apply Zle_minus_le_0.
+Qed.
+
+Theorem F2R_prec_normalize :
+ forall m e e' p : Z,
+ (Zabs m < Zpower beta p)%Z ->
+ (bpow (e' - 1)%Z <= Rabs (F2R (Float beta m e)))%R ->
+ F2R (Float beta m e) = F2R (Float beta (m * Zpower beta (e - e' + p)) (e' - p)).
+Proof.
+intros m e e' p Hm Hf.
+assert (Hp: (0 <= p)%Z).
+destruct p ; try easy.
+now elim (Zle_not_lt _ _ (Zabs_pos m)).
+(* . *)
+replace (e - e' + p)%Z with (e - (e' - p))%Z by ring.
+apply F2R_change_exp.
+cut (e' - 1 < e + p)%Z. omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (1 := Hf).
+rewrite <- F2R_Zabs, Zplus_comm, bpow_plus.
+apply Rmult_lt_compat_r.
+apply bpow_gt_0.
+rewrite <- Z2R_Zpower.
+now apply Z2R_lt.
+exact Hp.
+Qed.
+
+(** Floats and ln_beta *)
+Theorem ln_beta_F2R_bounds :
+ forall x m e, (0 < m)%Z ->
+ (F2R (Float beta m e) <= x < F2R (Float beta (m + 1) e))%R ->
+ ln_beta beta x = ln_beta beta (F2R (Float beta m e)) :> Z.
+Proof.
+intros x m e Hp (Hx,Hx2).
+destruct (ln_beta beta (F2R (Float beta m e))) as (ex, He).
+simpl.
+apply ln_beta_unique.
+assert (Hp1: (0 < F2R (Float beta m e))%R).
+now apply F2R_gt_0_compat.
+specialize (He (Rgt_not_eq _ _ Hp1)).
+rewrite Rabs_pos_eq in He. 2: now apply Rlt_le.
+destruct He as (He1, He2).
+assert (Hx1: (0 < x)%R).
+now apply Rlt_le_trans with (2 := Hx).
+rewrite Rabs_pos_eq. 2: now apply Rlt_le.
+split.
+now apply Rle_trans with (1 := He1).
+apply Rlt_le_trans with (1 := Hx2).
+now apply F2R_p1_le_bpow.
+Qed.
+
+Theorem ln_beta_F2R :
+ forall m e : Z,
+ m <> Z0 ->
+ (ln_beta beta (F2R (Float beta m e)) = ln_beta beta (Z2R m) + e :> Z)%Z.
+Proof.
+intros m e H.
+unfold F2R ; simpl.
+apply ln_beta_mult_bpow.
+exact (Z2R_neq m 0 H).
+Qed.
+
+Theorem float_distribution_pos :
+ forall m1 e1 m2 e2 : Z,
+ (0 < m1)%Z ->
+ (F2R (Float beta m1 e1) < F2R (Float beta m2 e2) < F2R (Float beta (m1 + 1) e1))%R ->
+ (e2 < e1)%Z /\ (e1 + ln_beta beta (Z2R m1) = e2 + ln_beta beta (Z2R m2))%Z.
+Proof.
+intros m1 e1 m2 e2 Hp1 (H12, H21).
+assert (He: (e2 < e1)%Z).
+(* . *)
+apply Znot_ge_lt.
+intros H0.
+elim Rlt_not_le with (1 := H21).
+apply Zge_le in H0.
+apply (F2R_change_exp e1 m2 e2) in H0.
+rewrite H0.
+apply F2R_le_compat.
+apply Zlt_le_succ.
+apply (F2R_lt_reg e1).
+now rewrite <- H0.
+(* . *)
+split.
+exact He.
+rewrite (Zplus_comm e1), (Zplus_comm e2).
+assert (Hp2: (0 < m2)%Z).
+apply (F2R_gt_0_reg m2 e2).
+apply Rlt_trans with (2 := H12).
+now apply F2R_gt_0_compat.
+rewrite <- 2!ln_beta_F2R.
+destruct (ln_beta beta (F2R (Float beta m1 e1))) as (e1', H1).
+simpl.
+apply sym_eq.
+apply ln_beta_unique.
+assert (H2 : (bpow (e1' - 1) <= F2R (Float beta m1 e1) < bpow e1')%R).
+rewrite <- (Zabs_eq m1), F2R_Zabs.
+apply H1.
+apply Rgt_not_eq.
+apply Rlt_gt.
+now apply F2R_gt_0_compat.
+now apply Zlt_le_weak.
+clear H1.
+rewrite <- F2R_Zabs, Zabs_eq.
+split.
+apply Rlt_le.
+apply Rle_lt_trans with (2 := H12).
+apply H2.
+apply Rlt_le_trans with (1 := H21).
+now apply F2R_p1_le_bpow.
+now apply Zlt_le_weak.
+apply sym_not_eq.
+now apply Zlt_not_eq.
+apply sym_not_eq.
+now apply Zlt_not_eq.
+Qed.
+
+Theorem F2R_cond_Zopp :
+ forall b m e,
+ F2R (Float beta (cond_Zopp b m) e) = cond_Ropp b (F2R (Float beta m e)).
+Proof.
+intros [|] m e ; unfold F2R ; simpl.
+now rewrite Z2R_opp, Ropp_mult_distr_l_reverse.
+apply refl_equal.
+Qed.
+
+End Float_prop.
diff --git a/flocq/Core/Fcore_generic_fmt.v b/flocq/Core/Fcore_generic_fmt.v
new file mode 100644
index 0000000..b1db47c
--- /dev/null
+++ b/flocq/Core/Fcore_generic_fmt.v
@@ -0,0 +1,2232 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * What is a real number belonging to a format, and many properties. *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_float_prop.
+
+Section Generic.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Section Format.
+
+Variable fexp : Z -> Z.
+
+(** To be a good fexp *)
+
+Class Valid_exp :=
+ valid_exp :
+ forall k : Z,
+ ( (fexp k < k)%Z -> (fexp (k + 1) <= k)%Z ) /\
+ ( (k <= fexp k)%Z ->
+ (fexp (fexp k + 1) <= fexp k)%Z /\
+ forall l : Z, (l <= fexp k)%Z -> fexp l = fexp k ).
+
+Context { valid_exp_ : Valid_exp }.
+
+Theorem valid_exp_large :
+ forall k l,
+ (fexp k < k)%Z -> (k <= l)%Z ->
+ (fexp l < l)%Z.
+Proof.
+intros k l Hk H.
+apply Znot_ge_lt.
+intros Hl.
+apply Zge_le in Hl.
+assert (H' := proj2 (proj2 (valid_exp l) Hl) k).
+omega.
+Qed.
+
+Theorem valid_exp_large' :
+ forall k l,
+ (fexp k < k)%Z -> (l <= k)%Z ->
+ (fexp l < k)%Z.
+Proof.
+intros k l Hk H.
+apply Znot_ge_lt.
+intros H'.
+apply Zge_le in H'.
+assert (Hl := Zle_trans _ _ _ H H').
+apply valid_exp in Hl.
+assert (H1 := proj2 Hl k H').
+omega.
+Qed.
+
+Definition canonic_exp x :=
+ fexp (ln_beta beta x).
+
+Definition canonic (f : float beta) :=
+ Fexp f = canonic_exp (F2R f).
+
+Definition scaled_mantissa x :=
+ (x * bpow (- canonic_exp x))%R.
+
+Definition generic_format (x : R) :=
+ x = F2R (Float beta (Ztrunc (scaled_mantissa x)) (canonic_exp x)).
+
+(** Basic facts *)
+Theorem generic_format_0 :
+ generic_format 0.
+Proof.
+unfold generic_format, scaled_mantissa.
+rewrite Rmult_0_l.
+change (Ztrunc 0) with (Ztrunc (Z2R 0)).
+now rewrite Ztrunc_Z2R, F2R_0.
+Qed.
+
+Theorem canonic_exp_opp :
+ forall x,
+ canonic_exp (-x) = canonic_exp x.
+Proof.
+intros x.
+unfold canonic_exp.
+now rewrite ln_beta_opp.
+Qed.
+
+Theorem canonic_exp_abs :
+ forall x,
+ canonic_exp (Rabs x) = canonic_exp x.
+Proof.
+intros x.
+unfold canonic_exp.
+now rewrite ln_beta_abs.
+Qed.
+
+Theorem generic_format_bpow :
+ forall e, (fexp (e + 1) <= e)%Z ->
+ generic_format (bpow e).
+Proof.
+intros e H.
+unfold generic_format, scaled_mantissa, canonic_exp.
+rewrite ln_beta_bpow.
+rewrite <- bpow_plus.
+rewrite <- (Z2R_Zpower beta (e + - fexp (e + 1))).
+rewrite Ztrunc_Z2R.
+rewrite <- F2R_bpow.
+rewrite F2R_change_exp with (1 := H).
+now rewrite Zmult_1_l.
+now apply Zle_minus_le_0.
+Qed.
+
+Theorem generic_format_bpow' :
+ forall e, (fexp e <= e)%Z ->
+ generic_format (bpow e).
+Proof.
+intros e He.
+apply generic_format_bpow.
+destruct (Zle_lt_or_eq _ _ He).
+now apply valid_exp.
+rewrite <- H.
+apply valid_exp_.
+rewrite H.
+apply Zle_refl.
+Qed.
+
+Theorem generic_format_F2R :
+ forall m e,
+ ( m <> 0 -> canonic_exp (F2R (Float beta m e)) <= e )%Z ->
+ generic_format (F2R (Float beta m e)).
+Proof.
+intros m e.
+destruct (Z_eq_dec m 0) as [Zm|Zm].
+intros _.
+rewrite Zm, F2R_0.
+apply generic_format_0.
+unfold generic_format, scaled_mantissa.
+set (e' := canonic_exp (F2R (Float beta m e))).
+intros He.
+specialize (He Zm).
+unfold F2R at 3. simpl.
+rewrite F2R_change_exp with (1 := He).
+apply F2R_eq_compat.
+rewrite Rmult_assoc, <- bpow_plus, <- Z2R_Zpower, <- Z2R_mult.
+now rewrite Ztrunc_Z2R.
+now apply Zle_left.
+Qed.
+
+Theorem canonic_opp :
+ forall m e,
+ canonic (Float beta m e) ->
+ canonic (Float beta (-m) e).
+Proof.
+intros m e H.
+unfold canonic.
+now rewrite F2R_Zopp, canonic_exp_opp.
+Qed.
+
+Theorem canonic_unicity :
+ forall f1 f2,
+ canonic f1 ->
+ canonic f2 ->
+ F2R f1 = F2R f2 ->
+ f1 = f2.
+Proof.
+intros (m1, e1) (m2, e2).
+unfold canonic. simpl.
+intros H1 H2 H.
+rewrite H in H1.
+rewrite <- H2 in H1. clear H2.
+rewrite H1 in H |- *.
+apply (f_equal (fun m => Float beta m e2)).
+apply F2R_eq_reg with (1 := H).
+Qed.
+
+Theorem scaled_mantissa_generic :
+ forall x,
+ generic_format x ->
+ scaled_mantissa x = Z2R (Ztrunc (scaled_mantissa x)).
+Proof.
+intros x Hx.
+unfold scaled_mantissa.
+pattern x at 1 3 ; rewrite Hx.
+unfold F2R. simpl.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+now rewrite Ztrunc_Z2R.
+Qed.
+
+Theorem scaled_mantissa_mult_bpow :
+ forall x,
+ (scaled_mantissa x * bpow (canonic_exp x))%R = x.
+Proof.
+intros x.
+unfold scaled_mantissa.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_l.
+apply Rmult_1_r.
+Qed.
+
+Theorem scaled_mantissa_0 :
+ scaled_mantissa 0 = R0.
+Proof.
+apply Rmult_0_l.
+Qed.
+
+Theorem scaled_mantissa_opp :
+ forall x,
+ scaled_mantissa (-x) = (-scaled_mantissa x)%R.
+Proof.
+intros x.
+unfold scaled_mantissa.
+rewrite canonic_exp_opp.
+now rewrite Ropp_mult_distr_l_reverse.
+Qed.
+
+Theorem scaled_mantissa_abs :
+ forall x,
+ scaled_mantissa (Rabs x) = Rabs (scaled_mantissa x).
+Proof.
+intros x.
+unfold scaled_mantissa.
+rewrite canonic_exp_abs, Rabs_mult.
+apply f_equal.
+apply sym_eq.
+apply Rabs_pos_eq.
+apply bpow_ge_0.
+Qed.
+
+Theorem generic_format_opp :
+ forall x, generic_format x -> generic_format (-x).
+Proof.
+intros x Hx.
+unfold generic_format.
+rewrite scaled_mantissa_opp, canonic_exp_opp.
+rewrite Ztrunc_opp.
+rewrite F2R_Zopp.
+now apply f_equal.
+Qed.
+
+Theorem generic_format_abs :
+ forall x, generic_format x -> generic_format (Rabs x).
+Proof.
+intros x Hx.
+unfold generic_format.
+rewrite scaled_mantissa_abs, canonic_exp_abs.
+rewrite Ztrunc_abs.
+rewrite F2R_Zabs.
+now apply f_equal.
+Qed.
+
+Theorem generic_format_abs_inv :
+ forall x, generic_format (Rabs x) -> generic_format x.
+Proof.
+intros x.
+unfold generic_format, Rabs.
+case Rcase_abs ; intros _.
+rewrite scaled_mantissa_opp, canonic_exp_opp, Ztrunc_opp.
+intros H.
+rewrite <- (Ropp_involutive x) at 1.
+rewrite H, F2R_Zopp.
+apply Ropp_involutive.
+easy.
+Qed.
+
+Theorem canonic_exp_fexp :
+ forall x ex,
+ (bpow (ex - 1) <= Rabs x < bpow ex)%R ->
+ canonic_exp x = fexp ex.
+Proof.
+intros x ex Hx.
+unfold canonic_exp.
+now rewrite ln_beta_unique with (1 := Hx).
+Qed.
+
+Theorem canonic_exp_fexp_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ canonic_exp x = fexp ex.
+Proof.
+intros x ex Hx.
+apply canonic_exp_fexp.
+rewrite Rabs_pos_eq.
+exact Hx.
+apply Rle_trans with (2 := proj1 Hx).
+apply bpow_ge_0.
+Qed.
+
+(** Properties when the real number is "small" (kind of subnormal) *)
+Theorem mantissa_small_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ (0 < x * bpow (- fexp ex) < 1)%R.
+Proof.
+intros x ex Hx He.
+split.
+apply Rmult_lt_0_compat.
+apply Rlt_le_trans with (2 := proj1 Hx).
+apply bpow_gt_0.
+apply bpow_gt_0.
+apply Rmult_lt_reg_r with (bpow (fexp ex)).
+apply bpow_gt_0.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_l.
+rewrite Rmult_1_r, Rmult_1_l.
+apply Rlt_le_trans with (1 := proj2 Hx).
+now apply bpow_le.
+Qed.
+
+Theorem scaled_mantissa_small :
+ forall x ex,
+ (Rabs x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ (Rabs (scaled_mantissa x) < 1)%R.
+Proof.
+intros x ex Ex He.
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx, scaled_mantissa_0, Rabs_R0.
+now apply (Z2R_lt 0 1).
+rewrite <- scaled_mantissa_abs.
+unfold scaled_mantissa.
+rewrite canonic_exp_abs.
+unfold canonic_exp.
+destruct (ln_beta beta x) as (ex', Ex').
+simpl.
+specialize (Ex' Zx).
+apply (mantissa_small_pos _ _ Ex').
+assert (ex' <= fexp ex)%Z.
+apply Zle_trans with (2 := He).
+apply bpow_lt_bpow with beta.
+now apply Rle_lt_trans with (2 := Ex).
+now rewrite (proj2 (proj2 (valid_exp _) He)).
+Qed.
+
+Theorem abs_scaled_mantissa_lt_bpow :
+ forall x,
+ (Rabs (scaled_mantissa x) < bpow (ln_beta beta x - canonic_exp x))%R.
+Proof.
+intros x.
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx, scaled_mantissa_0, Rabs_R0.
+apply bpow_gt_0.
+apply Rlt_le_trans with (1 := bpow_ln_beta_gt beta _).
+apply bpow_le.
+unfold scaled_mantissa.
+rewrite ln_beta_mult_bpow with (1 := Zx).
+apply Zle_refl.
+Qed.
+
+Theorem ln_beta_generic_gt :
+ forall x, (x <> 0)%R ->
+ generic_format x ->
+ (canonic_exp x < ln_beta beta x)%Z.
+Proof.
+intros x Zx Gx.
+apply Znot_ge_lt.
+unfold canonic_exp.
+destruct (ln_beta beta x) as (ex,Ex) ; simpl.
+specialize (Ex Zx).
+intros H.
+apply Zge_le in H.
+generalize (scaled_mantissa_small x ex (proj2 Ex) H).
+contradict Zx.
+rewrite Gx.
+replace (Ztrunc (scaled_mantissa x)) with Z0.
+apply F2R_0.
+cut (Zabs (Ztrunc (scaled_mantissa x)) < 1)%Z.
+clear ; zify ; omega.
+apply lt_Z2R.
+rewrite Z2R_abs.
+now rewrite <- scaled_mantissa_generic.
+Qed.
+
+Theorem mantissa_DN_small_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ Zfloor (x * bpow (- fexp ex)) = Z0.
+Proof.
+intros x ex Hx He.
+apply Zfloor_imp. simpl.
+assert (H := mantissa_small_pos x ex Hx He).
+split ; try apply Rlt_le ; apply H.
+Qed.
+
+Theorem mantissa_UP_small_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ Zceil (x * bpow (- fexp ex)) = 1%Z.
+Proof.
+intros x ex Hx He.
+apply Zceil_imp. simpl.
+assert (H := mantissa_small_pos x ex Hx He).
+split ; try apply Rlt_le ; apply H.
+Qed.
+
+(** Generic facts about any format *)
+Theorem generic_format_discrete :
+ forall x m,
+ let e := canonic_exp x in
+ (F2R (Float beta m e) < x < F2R (Float beta (m + 1) e))%R ->
+ ~ generic_format x.
+Proof.
+intros x m e (Hx,Hx2) Hf.
+apply Rlt_not_le with (1 := Hx2). clear Hx2.
+rewrite Hf.
+fold e.
+apply F2R_le_compat.
+apply Zlt_le_succ.
+apply lt_Z2R.
+rewrite <- scaled_mantissa_generic with (1 := Hf).
+apply Rmult_lt_reg_r with (bpow e).
+apply bpow_gt_0.
+now rewrite scaled_mantissa_mult_bpow.
+Qed.
+
+Theorem generic_format_canonic :
+ forall f, canonic f ->
+ generic_format (F2R f).
+Proof.
+intros (m, e) Hf.
+unfold canonic in Hf. simpl in Hf.
+unfold generic_format, scaled_mantissa.
+rewrite <- Hf.
+apply F2R_eq_compat.
+unfold F2R. simpl.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+now rewrite Ztrunc_Z2R.
+Qed.
+
+Theorem generic_format_ge_bpow :
+ forall emin,
+ ( forall e, (emin <= fexp e)%Z ) ->
+ forall x,
+ (0 < x)%R ->
+ generic_format x ->
+ (bpow emin <= x)%R.
+Proof.
+intros emin Emin x Hx Fx.
+rewrite Fx.
+apply Rle_trans with (bpow (fexp (ln_beta beta x))).
+now apply bpow_le.
+apply bpow_le_F2R.
+apply F2R_gt_0_reg with beta (canonic_exp x).
+now rewrite <- Fx.
+Qed.
+
+Theorem abs_lt_bpow_prec:
+ forall prec,
+ (forall e, (e - prec <= fexp e)%Z) ->
+ (* OK with FLX, FLT and FTZ *)
+ forall x,
+ (Rabs x < bpow (prec + canonic_exp x))%R.
+intros prec Hp x.
+case (Req_dec x 0); intros Hxz.
+rewrite Hxz, Rabs_R0.
+apply bpow_gt_0.
+unfold canonic_exp.
+destruct (ln_beta beta x) as (ex,Ex) ; simpl.
+specialize (Ex Hxz).
+apply Rlt_le_trans with (1 := proj2 Ex).
+apply bpow_le.
+specialize (Hp ex).
+omega.
+Qed.
+
+Theorem generic_format_bpow_inv' :
+ forall e,
+ generic_format (bpow e) ->
+ (fexp (e + 1) <= e)%Z.
+Proof.
+intros e He.
+apply Znot_gt_le.
+contradict He.
+unfold generic_format, scaled_mantissa, canonic_exp, F2R. simpl.
+rewrite ln_beta_bpow, <- bpow_plus.
+apply Rgt_not_eq.
+rewrite Ztrunc_floor.
+2: apply bpow_ge_0.
+rewrite Zfloor_imp with (n := Z0).
+rewrite Rmult_0_l.
+apply bpow_gt_0.
+split.
+apply bpow_ge_0.
+apply (bpow_lt _ _ 0).
+clear -He ; omega.
+Qed.
+
+Theorem generic_format_bpow_inv :
+ forall e,
+ generic_format (bpow e) ->
+ (fexp e <= e)%Z.
+Proof.
+intros e He.
+apply generic_format_bpow_inv' in He.
+assert (H := valid_exp_large' (e + 1) e).
+omega.
+Qed.
+
+Section Fcore_generic_round_pos.
+
+(** Rounding functions: R -> Z *)
+
+Variable rnd : R -> Z.
+
+Class Valid_rnd := {
+ Zrnd_le : forall x y, (x <= y)%R -> (rnd x <= rnd y)%Z ;
+ Zrnd_Z2R : forall n, rnd (Z2R n) = n
+}.
+
+Context { valid_rnd : Valid_rnd }.
+
+Theorem Zrnd_DN_or_UP :
+ forall x, rnd x = Zfloor x \/ rnd x = Zceil x.
+Proof.
+intros x.
+destruct (Zle_or_lt (rnd x) (Zfloor x)) as [Hx|Hx].
+left.
+apply Zle_antisym with (1 := Hx).
+rewrite <- (Zrnd_Z2R (Zfloor x)).
+apply Zrnd_le.
+apply Zfloor_lb.
+right.
+apply Zle_antisym.
+rewrite <- (Zrnd_Z2R (Zceil x)).
+apply Zrnd_le.
+apply Zceil_ub.
+rewrite Zceil_floor_neq.
+omega.
+intros H.
+rewrite <- H in Hx.
+rewrite Zfloor_Z2R, Zrnd_Z2R in Hx.
+apply Zlt_irrefl with (1 := Hx).
+Qed.
+
+Theorem Zrnd_ZR_or_AW :
+ forall x, rnd x = Ztrunc x \/ rnd x = Zaway x.
+Proof.
+intros x.
+unfold Ztrunc, Zaway.
+destruct (Zrnd_DN_or_UP x) as [Hx|Hx] ;
+ case Rlt_bool.
+now right.
+now left.
+now left.
+now right.
+Qed.
+
+(** the most useful one: R -> F *)
+Definition round x :=
+ F2R (Float beta (rnd (scaled_mantissa x)) (canonic_exp x)).
+
+Theorem round_le_pos :
+ forall x y, (0 < x)%R -> (x <= y)%R -> (round x <= round y)%R.
+Proof.
+intros x y Hx Hxy.
+unfold round, scaled_mantissa, canonic_exp.
+destruct (ln_beta beta x) as (ex, Hex). simpl.
+destruct (ln_beta beta y) as (ey, Hey). simpl.
+specialize (Hex (Rgt_not_eq _ _ Hx)).
+specialize (Hey (Rgt_not_eq _ _ (Rlt_le_trans _ _ _ Hx Hxy))).
+rewrite Rabs_pos_eq in Hex.
+2: now apply Rlt_le.
+rewrite Rabs_pos_eq in Hey.
+2: apply Rle_trans with (2:=Hxy); now apply Rlt_le.
+assert (He: (ex <= ey)%Z).
+cut (ex - 1 < ey)%Z. omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (1 := proj1 Hex).
+apply Rle_lt_trans with (1 := Hxy).
+apply Hey.
+destruct (Zle_or_lt ey (fexp ey)) as [Hy1|Hy1].
+rewrite (proj2 (proj2 (valid_exp ey) Hy1) ex).
+apply F2R_le_compat.
+apply Zrnd_le.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+exact Hxy.
+now apply Zle_trans with ey.
+destruct (Zle_lt_or_eq _ _ He) as [He'|He'].
+destruct (Zle_or_lt ey (fexp ex)) as [Hx2|Hx2].
+rewrite (proj2 (proj2 (valid_exp ex) (Zle_trans _ _ _ He Hx2)) ey Hx2).
+apply F2R_le_compat.
+apply Zrnd_le.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+exact Hxy.
+apply Rle_trans with (F2R (Float beta (rnd (bpow (ey - 1) * bpow (- fexp ey))) (fexp ey))).
+rewrite <- bpow_plus.
+rewrite <- (Z2R_Zpower beta (ey - 1 + -fexp ey)). 2: omega.
+rewrite Zrnd_Z2R.
+destruct (Zle_or_lt ex (fexp ex)) as [Hx1|Hx1].
+apply Rle_trans with (F2R (Float beta 1 (fexp ex))).
+apply F2R_le_compat.
+rewrite <- (Zrnd_Z2R 1).
+apply Zrnd_le.
+apply Rlt_le.
+exact (proj2 (mantissa_small_pos _ _ Hex Hx1)).
+unfold F2R. simpl.
+rewrite Z2R_Zpower. 2: omega.
+rewrite <- bpow_plus, Rmult_1_l.
+apply bpow_le.
+omega.
+apply Rle_trans with (F2R (Float beta (rnd (bpow ex * bpow (- fexp ex))) (fexp ex))).
+apply F2R_le_compat.
+apply Zrnd_le.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Rlt_le.
+apply Hex.
+rewrite <- bpow_plus.
+rewrite <- Z2R_Zpower. 2: omega.
+rewrite Zrnd_Z2R.
+unfold F2R. simpl.
+rewrite 2!Z2R_Zpower ; try omega.
+rewrite <- 2!bpow_plus.
+apply bpow_le.
+omega.
+apply F2R_le_compat.
+apply Zrnd_le.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Hey.
+rewrite He'.
+apply F2R_le_compat.
+apply Zrnd_le.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+exact Hxy.
+Qed.
+
+Theorem round_generic :
+ forall x,
+ generic_format x ->
+ round x = x.
+Proof.
+intros x Hx.
+unfold round.
+rewrite scaled_mantissa_generic with (1 := Hx).
+rewrite Zrnd_Z2R.
+now apply sym_eq.
+Qed.
+
+Theorem round_0 :
+ round 0 = R0.
+Proof.
+unfold round, scaled_mantissa.
+rewrite Rmult_0_l.
+fold (Z2R 0).
+rewrite Zrnd_Z2R.
+apply F2R_0.
+Qed.
+
+Theorem round_bounded_large_pos :
+ forall x ex,
+ (fexp ex < ex)%Z ->
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (bpow (ex - 1) <= round x <= bpow ex)%R.
+Proof.
+intros x ex He Hx.
+unfold round, scaled_mantissa.
+rewrite (canonic_exp_fexp_pos _ _ Hx).
+unfold F2R. simpl.
+destruct (Zrnd_DN_or_UP (x * bpow (- fexp ex))) as [Hr|Hr] ; rewrite Hr.
+(* DN *)
+split.
+replace (ex - 1)%Z with (ex - 1 + - fexp ex + fexp ex)%Z by ring.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+assert (Hf: Z2R (Zpower beta (ex - 1 - fexp ex)) = bpow (ex - 1 + - fexp ex)).
+apply Z2R_Zpower.
+omega.
+rewrite <- Hf.
+apply Z2R_le.
+apply Zfloor_lub.
+rewrite Hf.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Hx.
+apply Rle_trans with (2 := Rlt_le _ _ (proj2 Hx)).
+apply Rmult_le_reg_r with (bpow (- fexp ex)).
+apply bpow_gt_0.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+apply Zfloor_lb.
+(* UP *)
+split.
+apply Rle_trans with (1 := proj1 Hx).
+apply Rmult_le_reg_r with (bpow (- fexp ex)).
+apply bpow_gt_0.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+apply Zceil_ub.
+pattern ex at 3 ; replace ex with (ex - fexp ex + fexp ex)%Z by ring.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+assert (Hf: Z2R (Zpower beta (ex - fexp ex)) = bpow (ex - fexp ex)).
+apply Z2R_Zpower.
+omega.
+rewrite <- Hf.
+apply Z2R_le.
+apply Zceil_glb.
+rewrite Hf.
+unfold Zminus.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Rlt_le.
+apply Hx.
+Qed.
+
+Theorem round_bounded_small_pos :
+ forall x ex,
+ (ex <= fexp ex)%Z ->
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ round x = R0 \/ round x = bpow (fexp ex).
+Proof.
+intros x ex He Hx.
+unfold round, scaled_mantissa.
+rewrite (canonic_exp_fexp_pos _ _ Hx).
+unfold F2R. simpl.
+destruct (Zrnd_DN_or_UP (x * bpow (-fexp ex))) as [Hr|Hr] ; rewrite Hr.
+(* DN *)
+left.
+apply Rmult_eq_0_compat_r.
+apply (@f_equal _ _ Z2R _ Z0).
+apply Zfloor_imp.
+refine (let H := _ in conj (Rlt_le _ _ (proj1 H)) (proj2 H)).
+now apply mantissa_small_pos.
+(* UP *)
+right.
+pattern (bpow (fexp ex)) at 2 ; rewrite <- Rmult_1_l.
+apply (f_equal (fun m => (m * bpow (fexp ex))%R)).
+apply (@f_equal _ _ Z2R _ 1%Z).
+apply Zceil_imp.
+refine (let H := _ in conj (proj1 H) (Rlt_le _ _ (proj2 H))).
+now apply mantissa_small_pos.
+Qed.
+
+Theorem generic_format_round_pos :
+ forall x,
+ (0 < x)%R ->
+ generic_format (round x).
+Proof.
+intros x Hx0.
+destruct (ln_beta beta x) as (ex, Hex).
+specialize (Hex (Rgt_not_eq _ _ Hx0)).
+rewrite Rabs_pos_eq in Hex. 2: now apply Rlt_le.
+destruct (Zle_or_lt ex (fexp ex)) as [He|He].
+(* small *)
+destruct (round_bounded_small_pos _ _ He Hex) as [Hr|Hr] ; rewrite Hr.
+apply generic_format_0.
+apply generic_format_bpow.
+now apply valid_exp.
+(* large *)
+generalize (round_bounded_large_pos _ _ He Hex).
+intros (Hr1, Hr2).
+destruct (Rle_or_lt (bpow ex) (round x)) as [Hr|Hr].
+rewrite <- (Rle_antisym _ _ Hr Hr2).
+apply generic_format_bpow.
+now apply valid_exp.
+assert (Hr' := conj Hr1 Hr).
+unfold generic_format, scaled_mantissa.
+rewrite (canonic_exp_fexp_pos _ _ Hr').
+unfold round, scaled_mantissa.
+rewrite (canonic_exp_fexp_pos _ _ Hex).
+unfold F2R at 3. simpl.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+now rewrite Ztrunc_Z2R.
+Qed.
+
+End Fcore_generic_round_pos.
+
+Theorem round_ext :
+ forall rnd1 rnd2,
+ ( forall x, rnd1 x = rnd2 x ) ->
+ forall x,
+ round rnd1 x = round rnd2 x.
+Proof.
+intros rnd1 rnd2 Hext x.
+unfold round.
+now rewrite Hext.
+Qed.
+
+Section Zround_opp.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Definition Zrnd_opp x := Zopp (rnd (-x)).
+
+Global Instance valid_rnd_opp : Valid_rnd Zrnd_opp.
+Proof with auto with typeclass_instances.
+split.
+(* *)
+intros x y Hxy.
+unfold Zrnd_opp.
+apply Zopp_le_cancel.
+rewrite 2!Zopp_involutive.
+apply Zrnd_le...
+now apply Ropp_le_contravar.
+(* *)
+intros n.
+unfold Zrnd_opp.
+rewrite <- Z2R_opp, Zrnd_Z2R...
+apply Zopp_involutive.
+Qed.
+
+Theorem round_opp :
+ forall x,
+ round rnd (- x) = Ropp (round Zrnd_opp x).
+Proof.
+intros x.
+unfold round.
+rewrite <- F2R_Zopp, canonic_exp_opp, scaled_mantissa_opp.
+apply F2R_eq_compat.
+apply sym_eq.
+exact (Zopp_involutive _).
+Qed.
+
+End Zround_opp.
+
+(** IEEE-754 roundings: up, down and to zero *)
+
+Global Instance valid_rnd_DN : Valid_rnd Zfloor.
+Proof.
+split.
+apply Zfloor_le.
+apply Zfloor_Z2R.
+Qed.
+
+Global Instance valid_rnd_UP : Valid_rnd Zceil.
+Proof.
+split.
+apply Zceil_le.
+apply Zceil_Z2R.
+Qed.
+
+Global Instance valid_rnd_ZR : Valid_rnd Ztrunc.
+Proof.
+split.
+apply Ztrunc_le.
+apply Ztrunc_Z2R.
+Qed.
+
+Global Instance valid_rnd_AW : Valid_rnd Zaway.
+Proof.
+split.
+apply Zaway_le.
+apply Zaway_Z2R.
+Qed.
+
+Section monotone.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem round_DN_or_UP :
+ forall x,
+ round rnd x = round Zfloor x \/ round rnd x = round Zceil x.
+Proof.
+intros x.
+unfold round.
+destruct (Zrnd_DN_or_UP rnd (scaled_mantissa x)) as [Hx|Hx].
+left. now rewrite Hx.
+right. now rewrite Hx.
+Qed.
+
+Theorem round_ZR_or_AW :
+ forall x,
+ round rnd x = round Ztrunc x \/ round rnd x = round Zaway x.
+Proof.
+intros x.
+unfold round.
+destruct (Zrnd_ZR_or_AW rnd (scaled_mantissa x)) as [Hx|Hx].
+left. now rewrite Hx.
+right. now rewrite Hx.
+Qed.
+
+Theorem round_le :
+ forall x y, (x <= y)%R -> (round rnd x <= round rnd y)%R.
+Proof with auto with typeclass_instances.
+intros x y Hxy.
+destruct (total_order_T x 0) as [[Hx|Hx]|Hx].
+3: now apply round_le_pos.
+(* x < 0 *)
+unfold round.
+destruct (Rlt_or_le y 0) as [Hy|Hy].
+(* . y < 0 *)
+rewrite <- (Ropp_involutive x), <- (Ropp_involutive y).
+rewrite (scaled_mantissa_opp (-x)), (scaled_mantissa_opp (-y)).
+rewrite (canonic_exp_opp (-x)), (canonic_exp_opp (-y)).
+apply Ropp_le_cancel.
+rewrite <- 2!F2R_Zopp.
+apply (round_le_pos (Zrnd_opp rnd) (-y) (-x)).
+rewrite <- Ropp_0.
+now apply Ropp_lt_contravar.
+now apply Ropp_le_contravar.
+(* . 0 <= y *)
+apply Rle_trans with R0.
+apply F2R_le_0_compat. simpl.
+rewrite <- (Zrnd_Z2R rnd 0).
+apply Zrnd_le...
+simpl.
+rewrite <- (Rmult_0_l (bpow (- fexp (ln_beta beta x)))).
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+now apply Rlt_le.
+apply F2R_ge_0_compat. simpl.
+rewrite <- (Zrnd_Z2R rnd 0).
+apply Zrnd_le...
+apply Rmult_le_pos.
+exact Hy.
+apply bpow_ge_0.
+(* x = 0 *)
+rewrite Hx.
+rewrite round_0...
+apply F2R_ge_0_compat.
+simpl.
+rewrite <- (Zrnd_Z2R rnd 0).
+apply Zrnd_le...
+apply Rmult_le_pos.
+now rewrite <- Hx.
+apply bpow_ge_0.
+Qed.
+
+Theorem round_ge_generic :
+ forall x y, generic_format x -> (x <= y)%R -> (x <= round rnd y)%R.
+Proof.
+intros x y Hx Hxy.
+rewrite <- (round_generic rnd x Hx).
+now apply round_le.
+Qed.
+
+Theorem round_le_generic :
+ forall x y, generic_format y -> (x <= y)%R -> (round rnd x <= y)%R.
+Proof.
+intros x y Hy Hxy.
+rewrite <- (round_generic rnd y Hy).
+now apply round_le.
+Qed.
+
+End monotone.
+
+Theorem round_abs_abs' :
+ forall P : R -> R -> Prop,
+ ( forall rnd (Hr : Valid_rnd rnd) x, (0 <= x)%R -> P x (round rnd x) ) ->
+ forall rnd {Hr : Valid_rnd rnd} x, P (Rabs x) (Rabs (round rnd x)).
+Proof with auto with typeclass_instances.
+intros P HP rnd Hr x.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+(* . *)
+rewrite 2!Rabs_pos_eq.
+now apply HP.
+rewrite <- (round_0 rnd).
+now apply round_le.
+exact Hx.
+(* . *)
+rewrite (Rabs_left _ Hx).
+rewrite Rabs_left1.
+pattern x at 2 ; rewrite <- Ropp_involutive.
+rewrite round_opp.
+rewrite Ropp_involutive.
+apply HP...
+rewrite <- Ropp_0.
+apply Ropp_le_contravar.
+now apply Rlt_le.
+rewrite <- (round_0 rnd).
+apply round_le...
+now apply Rlt_le.
+Qed.
+
+(* TODO: remove *)
+Theorem round_abs_abs :
+ forall P : R -> R -> Prop,
+ ( forall rnd (Hr : Valid_rnd rnd) x, P x (round rnd x) ) ->
+ forall rnd {Hr : Valid_rnd rnd} x, P (Rabs x) (Rabs (round rnd x)).
+Proof.
+intros P HP.
+apply round_abs_abs'.
+intros.
+now apply HP.
+Qed.
+
+Theorem round_bounded_large :
+ forall rnd {Hr : Valid_rnd rnd} x ex,
+ (fexp ex < ex)%Z ->
+ (bpow (ex - 1) <= Rabs x < bpow ex)%R ->
+ (bpow (ex - 1) <= Rabs (round rnd x) <= bpow ex)%R.
+Proof with auto with typeclass_instances.
+intros rnd Hr x ex He.
+apply round_abs_abs...
+clear rnd Hr x.
+intros rnd' Hr x.
+apply round_bounded_large_pos...
+Qed.
+
+Section monotone_abs.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem abs_round_ge_generic :
+ forall x y, generic_format x -> (x <= Rabs y)%R -> (x <= Rabs (round rnd y))%R.
+Proof with auto with typeclass_instances.
+intros x y.
+apply round_abs_abs...
+clear rnd valid_rnd y.
+intros rnd' Hrnd y Hy.
+apply round_ge_generic...
+Qed.
+
+Theorem abs_round_le_generic :
+ forall x y, generic_format y -> (Rabs x <= y)%R -> (Rabs (round rnd x) <= y)%R.
+Proof with auto with typeclass_instances.
+intros x y.
+apply round_abs_abs...
+clear rnd valid_rnd x.
+intros rnd' Hrnd x Hx.
+apply round_le_generic...
+Qed.
+
+End monotone_abs.
+
+Theorem round_DN_opp :
+ forall x,
+ round Zfloor (-x) = (- round Zceil x)%R.
+Proof.
+intros x.
+unfold round.
+rewrite scaled_mantissa_opp.
+rewrite <- F2R_Zopp.
+unfold Zceil.
+rewrite Zopp_involutive.
+now rewrite canonic_exp_opp.
+Qed.
+
+Theorem round_UP_opp :
+ forall x,
+ round Zceil (-x) = (- round Zfloor x)%R.
+Proof.
+intros x.
+unfold round.
+rewrite scaled_mantissa_opp.
+rewrite <- F2R_Zopp.
+unfold Zceil.
+rewrite Ropp_involutive.
+now rewrite canonic_exp_opp.
+Qed.
+
+Theorem round_ZR_opp :
+ forall x,
+ round Ztrunc (- x) = Ropp (round Ztrunc x).
+Proof.
+intros x.
+unfold round.
+rewrite scaled_mantissa_opp, canonic_exp_opp, Ztrunc_opp.
+apply F2R_Zopp.
+Qed.
+
+Theorem round_ZR_abs :
+ forall x,
+ round Ztrunc (Rabs x) = Rabs (round Ztrunc x).
+Proof with auto with typeclass_instances.
+intros x.
+apply sym_eq.
+unfold Rabs at 2.
+destruct (Rcase_abs x) as [Hx|Hx].
+rewrite round_ZR_opp.
+apply Rabs_left1.
+rewrite <- (round_0 Ztrunc).
+apply round_le...
+now apply Rlt_le.
+apply Rabs_pos_eq.
+rewrite <- (round_0 Ztrunc).
+apply round_le...
+now apply Rge_le.
+Qed.
+
+Theorem round_AW_opp :
+ forall x,
+ round Zaway (- x) = Ropp (round Zaway x).
+Proof.
+intros x.
+unfold round.
+rewrite scaled_mantissa_opp, canonic_exp_opp, Zaway_opp.
+apply F2R_Zopp.
+Qed.
+
+Theorem round_AW_abs :
+ forall x,
+ round Zaway (Rabs x) = Rabs (round Zaway x).
+Proof with auto with typeclass_instances.
+intros x.
+apply sym_eq.
+unfold Rabs at 2.
+destruct (Rcase_abs x) as [Hx|Hx].
+rewrite round_AW_opp.
+apply Rabs_left1.
+rewrite <- (round_0 Zaway).
+apply round_le...
+now apply Rlt_le.
+apply Rabs_pos_eq.
+rewrite <- (round_0 Zaway).
+apply round_le...
+now apply Rge_le.
+Qed.
+
+Theorem round_ZR_pos :
+ forall x,
+ (0 <= x)%R ->
+ round Ztrunc x = round Zfloor x.
+Proof.
+intros x Hx.
+unfold round, Ztrunc.
+case Rlt_bool_spec.
+intros H.
+elim Rlt_not_le with (1 := H).
+rewrite <- (Rmult_0_l (bpow (- canonic_exp x))).
+apply Rmult_le_compat_r with (2 := Hx).
+apply bpow_ge_0.
+easy.
+Qed.
+
+Theorem round_ZR_neg :
+ forall x,
+ (x <= 0)%R ->
+ round Ztrunc x = round Zceil x.
+Proof.
+intros x Hx.
+unfold round, Ztrunc.
+case Rlt_bool_spec.
+easy.
+intros [H|H].
+elim Rlt_not_le with (1 := H).
+rewrite <- (Rmult_0_l (bpow (- canonic_exp x))).
+apply Rmult_le_compat_r with (2 := Hx).
+apply bpow_ge_0.
+rewrite <- H.
+change R0 with (Z2R 0).
+now rewrite Zfloor_Z2R, Zceil_Z2R.
+Qed.
+
+Theorem round_AW_pos :
+ forall x,
+ (0 <= x)%R ->
+ round Zaway x = round Zceil x.
+Proof.
+intros x Hx.
+unfold round, Zaway.
+case Rlt_bool_spec.
+intros H.
+elim Rlt_not_le with (1 := H).
+rewrite <- (Rmult_0_l (bpow (- canonic_exp x))).
+apply Rmult_le_compat_r with (2 := Hx).
+apply bpow_ge_0.
+easy.
+Qed.
+
+Theorem round_AW_neg :
+ forall x,
+ (x <= 0)%R ->
+ round Zaway x = round Zfloor x.
+Proof.
+intros x Hx.
+unfold round, Zaway.
+case Rlt_bool_spec.
+easy.
+intros [H|H].
+elim Rlt_not_le with (1 := H).
+rewrite <- (Rmult_0_l (bpow (- canonic_exp x))).
+apply Rmult_le_compat_r with (2 := Hx).
+apply bpow_ge_0.
+rewrite <- H.
+change R0 with (Z2R 0).
+now rewrite Zfloor_Z2R, Zceil_Z2R.
+Qed.
+
+Theorem generic_format_round :
+ forall rnd { Hr : Valid_rnd rnd } x,
+ generic_format (round rnd x).
+Proof with auto with typeclass_instances.
+intros rnd Zrnd x.
+destruct (total_order_T x 0) as [[Hx|Hx]|Hx].
+rewrite <- (Ropp_involutive x).
+destruct (round_DN_or_UP rnd (- - x)) as [Hr|Hr] ; rewrite Hr.
+rewrite round_DN_opp.
+apply generic_format_opp.
+apply generic_format_round_pos...
+now apply Ropp_0_gt_lt_contravar.
+rewrite round_UP_opp.
+apply generic_format_opp.
+apply generic_format_round_pos...
+now apply Ropp_0_gt_lt_contravar.
+rewrite Hx.
+rewrite round_0...
+apply generic_format_0.
+now apply generic_format_round_pos.
+Qed.
+
+Theorem round_DN_pt :
+ forall x,
+ Rnd_DN_pt generic_format x (round Zfloor x).
+Proof with auto with typeclass_instances.
+intros x.
+split.
+apply generic_format_round...
+split.
+pattern x at 2 ; rewrite <- scaled_mantissa_mult_bpow.
+unfold round, F2R. simpl.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+apply Zfloor_lb.
+intros g Hg Hgx.
+apply round_ge_generic...
+Qed.
+
+Theorem generic_format_satisfies_any :
+ satisfies_any generic_format.
+Proof.
+split.
+(* symmetric set *)
+exact generic_format_0.
+exact generic_format_opp.
+(* round down *)
+intros x.
+eexists.
+apply round_DN_pt.
+Qed.
+
+Theorem round_UP_pt :
+ forall x,
+ Rnd_UP_pt generic_format x (round Zceil x).
+Proof.
+intros x.
+rewrite <- (Ropp_involutive x).
+rewrite round_UP_opp.
+apply Rnd_DN_UP_pt_sym.
+apply generic_format_opp.
+apply round_DN_pt.
+Qed.
+
+Theorem round_ZR_pt :
+ forall x,
+ Rnd_ZR_pt generic_format x (round Ztrunc x).
+Proof.
+intros x.
+split ; intros Hx.
+rewrite round_ZR_pos with (1 := Hx).
+apply round_DN_pt.
+rewrite round_ZR_neg with (1 := Hx).
+apply round_UP_pt.
+Qed.
+
+Theorem round_DN_small_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ round Zfloor x = R0.
+Proof.
+intros x ex Hx He.
+rewrite <- (F2R_0 beta (canonic_exp x)).
+rewrite <- mantissa_DN_small_pos with (1 := Hx) (2 := He).
+now rewrite <- canonic_exp_fexp_pos with (1 := Hx).
+Qed.
+
+Theorem round_UP_small_pos :
+ forall x ex,
+ (bpow (ex - 1) <= x < bpow ex)%R ->
+ (ex <= fexp ex)%Z ->
+ round Zceil x = (bpow (fexp ex)).
+Proof.
+intros x ex Hx He.
+rewrite <- F2R_bpow.
+rewrite <- mantissa_UP_small_pos with (1 := Hx) (2 := He).
+now rewrite <- canonic_exp_fexp_pos with (1 := Hx).
+Qed.
+
+Theorem generic_format_EM :
+ forall x,
+ generic_format x \/ ~generic_format x.
+Proof with auto with typeclass_instances.
+intros x.
+destruct (Req_dec (round Zfloor x) x) as [Hx|Hx].
+left.
+rewrite <- Hx.
+apply generic_format_round...
+right.
+intros H.
+apply Hx.
+apply round_generic...
+Qed.
+
+Section round_large.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem round_large_pos_ge_pow :
+ forall x e,
+ (0 < round rnd x)%R ->
+ (bpow e <= x)%R ->
+ (bpow e <= round rnd x)%R.
+Proof.
+intros x e Hd Hex.
+destruct (ln_beta beta x) as (ex, He).
+assert (Hx: (0 < x)%R).
+apply Rlt_le_trans with (2 := Hex).
+apply bpow_gt_0.
+specialize (He (Rgt_not_eq _ _ Hx)).
+rewrite Rabs_pos_eq in He. 2: now apply Rlt_le.
+apply Rle_trans with (bpow (ex - 1)).
+apply bpow_le.
+cut (e < ex)%Z. omega.
+apply (lt_bpow beta).
+now apply Rle_lt_trans with (2 := proj2 He).
+destruct (Zle_or_lt ex (fexp ex)).
+destruct (round_bounded_small_pos rnd x ex H He) as [Hr|Hr].
+rewrite Hr in Hd.
+elim Rlt_irrefl with (1 := Hd).
+rewrite Hr.
+apply bpow_le.
+omega.
+apply (round_bounded_large_pos rnd x ex H He).
+Qed.
+
+End round_large.
+
+Theorem ln_beta_round_ZR :
+ forall x,
+ (round Ztrunc x <> 0)%R ->
+ (ln_beta beta (round Ztrunc x) = ln_beta beta x :> Z).
+Proof with auto with typeclass_instances.
+intros x Zr.
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx, round_0...
+apply ln_beta_unique.
+destruct (ln_beta beta x) as (ex, Ex) ; simpl.
+specialize (Ex Zx).
+rewrite <- round_ZR_abs.
+split.
+apply round_large_pos_ge_pow...
+rewrite round_ZR_abs.
+now apply Rabs_pos_lt.
+apply Ex.
+apply Rle_lt_trans with (2 := proj2 Ex).
+rewrite round_ZR_pos.
+apply round_DN_pt.
+apply Rabs_pos.
+Qed.
+
+Theorem ln_beta_round :
+ forall rnd {Hrnd : Valid_rnd rnd} x,
+ (round rnd x <> 0)%R ->
+ (ln_beta beta (round rnd x) = ln_beta beta x :> Z) \/
+ Rabs (round rnd x) = bpow (Zmax (ln_beta beta x) (fexp (ln_beta beta x))).
+Proof with auto with typeclass_instances.
+intros rnd Hrnd x.
+destruct (round_ZR_or_AW rnd x) as [Hr|Hr] ; rewrite Hr ; clear Hr rnd Hrnd.
+left.
+now apply ln_beta_round_ZR.
+intros Zr.
+destruct (Req_dec x 0) as [Zx|Zx].
+rewrite Zx, round_0...
+destruct (ln_beta beta x) as (ex, Ex) ; simpl.
+specialize (Ex Zx).
+rewrite <- ln_beta_abs.
+rewrite <- round_AW_abs.
+destruct (Zle_or_lt ex (fexp ex)) as [He|He].
+right.
+rewrite Zmax_r with (1 := He).
+rewrite round_AW_pos with (1 := Rabs_pos _).
+now apply round_UP_small_pos.
+destruct (round_bounded_large_pos Zaway _ ex He Ex) as (H1,[H2|H2]).
+left.
+apply ln_beta_unique.
+rewrite <- round_AW_abs, Rabs_Rabsolu.
+now split.
+right.
+now rewrite Zmax_l with (1 := Zlt_le_weak _ _ He).
+Qed.
+
+Theorem ln_beta_round_DN :
+ forall x,
+ (0 < round Zfloor x)%R ->
+ (ln_beta beta (round Zfloor x) = ln_beta beta x :> Z).
+Proof.
+intros x Hd.
+assert (0 < x)%R.
+apply Rlt_le_trans with (1 := Hd).
+apply round_DN_pt.
+revert Hd.
+rewrite <- round_ZR_pos.
+intros Hd.
+apply ln_beta_round_ZR.
+now apply Rgt_not_eq.
+now apply Rlt_le.
+Qed.
+
+(* TODO: remove *)
+Theorem canonic_exp_DN :
+ forall x,
+ (0 < round Zfloor x)%R ->
+ canonic_exp (round Zfloor x) = canonic_exp x.
+Proof.
+intros x Hd.
+apply (f_equal fexp).
+now apply ln_beta_round_DN.
+Qed.
+
+Theorem scaled_mantissa_DN :
+ forall x,
+ (0 < round Zfloor x)%R ->
+ scaled_mantissa (round Zfloor x) = Z2R (Zfloor (scaled_mantissa x)).
+Proof.
+intros x Hd.
+unfold scaled_mantissa.
+rewrite canonic_exp_DN with (1 := Hd).
+unfold round, F2R. simpl.
+now rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+Qed.
+
+Theorem generic_N_pt_DN_or_UP :
+ forall x f,
+ Rnd_N_pt generic_format x f ->
+ f = round Zfloor x \/ f = round Zceil x.
+Proof.
+intros x f Hxf.
+destruct (Rnd_N_pt_DN_or_UP _ _ _ Hxf).
+left.
+apply Rnd_DN_pt_unicity with (1 := H).
+apply round_DN_pt.
+right.
+apply Rnd_UP_pt_unicity with (1 := H).
+apply round_UP_pt.
+Qed.
+
+Section not_FTZ.
+
+Class Exp_not_FTZ :=
+ exp_not_FTZ : forall e, (fexp (fexp e + 1) <= fexp e)%Z.
+
+Context { exp_not_FTZ_ : Exp_not_FTZ }.
+
+Theorem subnormal_exponent :
+ forall e x,
+ (e <= fexp e)%Z ->
+ generic_format x ->
+ x = F2R (Float beta (Ztrunc (x * bpow (- fexp e))) (fexp e)).
+Proof.
+intros e x He Hx.
+pattern x at 2 ; rewrite Hx.
+unfold F2R at 2. simpl.
+rewrite Rmult_assoc, <- bpow_plus.
+assert (H: Z2R (Zpower beta (canonic_exp x + - fexp e)) = bpow (canonic_exp x + - fexp e)).
+apply Z2R_Zpower.
+unfold canonic_exp.
+set (ex := ln_beta beta x).
+generalize (exp_not_FTZ ex).
+generalize (proj2 (proj2 (valid_exp _) He) (fexp ex + 1)%Z).
+omega.
+rewrite <- H.
+rewrite <- Z2R_mult, Ztrunc_Z2R.
+unfold F2R. simpl.
+rewrite Z2R_mult.
+rewrite H.
+rewrite Rmult_assoc, <- bpow_plus.
+now ring_simplify (canonic_exp x + - fexp e + fexp e)%Z.
+Qed.
+
+End not_FTZ.
+
+Section monotone_exp.
+
+Class Monotone_exp :=
+ monotone_exp : forall ex ey, (ex <= ey)%Z -> (fexp ex <= fexp ey)%Z.
+
+Context { monotone_exp_ : Monotone_exp }.
+
+Global Instance monotone_exp_not_FTZ : Exp_not_FTZ.
+Proof.
+intros e.
+destruct (Z_lt_le_dec (fexp e) e) as [He|He].
+apply monotone_exp.
+now apply Zlt_le_succ.
+now apply valid_exp.
+Qed.
+
+Lemma canonic_exp_le_bpow :
+ forall (x : R) (e : Z),
+ x <> R0 ->
+ (Rabs x < bpow e)%R ->
+ (canonic_exp x <= fexp e)%Z.
+Proof.
+intros x e Zx Hx.
+apply monotone_exp.
+now apply ln_beta_le_bpow.
+Qed.
+
+Lemma canonic_exp_ge_bpow :
+ forall (x : R) (e : Z),
+ (bpow (e - 1) <= Rabs x)%R ->
+ (fexp e <= canonic_exp x)%Z.
+Proof.
+intros x e Hx.
+apply monotone_exp.
+rewrite (Zsucc_pred e).
+apply Zlt_le_succ.
+now apply ln_beta_gt_bpow.
+Qed.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem ln_beta_round_ge :
+ forall x,
+ round rnd x <> R0 ->
+ (ln_beta beta x <= ln_beta beta (round rnd x))%Z.
+Proof with auto with typeclass_instances.
+intros x.
+destruct (round_ZR_or_AW rnd x) as [H|H] ; rewrite H ; clear H ; intros Zr.
+rewrite ln_beta_round_ZR with (1 := Zr).
+apply Zle_refl.
+apply ln_beta_le_abs.
+contradict Zr.
+rewrite Zr.
+apply round_0...
+rewrite <- round_AW_abs.
+rewrite round_AW_pos.
+apply round_UP_pt.
+apply Rabs_pos.
+Qed.
+
+Theorem canonic_exp_round_ge :
+ forall x,
+ round rnd x <> R0 ->
+ (canonic_exp x <= canonic_exp (round rnd x))%Z.
+Proof with auto with typeclass_instances.
+intros x Zr.
+unfold canonic_exp.
+apply monotone_exp.
+now apply ln_beta_round_ge.
+Qed.
+
+End monotone_exp.
+
+Section Znearest.
+
+(** Roundings to nearest: when in the middle, use the choice function *)
+Variable choice : Z -> bool.
+
+Definition Znearest x :=
+ match Rcompare (x - Z2R (Zfloor x)) (/2) with
+ | Lt => Zfloor x
+ | Eq => if choice (Zfloor x) then Zceil x else Zfloor x
+ | Gt => Zceil x
+ end.
+
+Theorem Znearest_DN_or_UP :
+ forall x,
+ Znearest x = Zfloor x \/ Znearest x = Zceil x.
+Proof.
+intros x.
+unfold Znearest.
+case Rcompare_spec ; intros _.
+now left.
+case choice.
+now right.
+now left.
+now right.
+Qed.
+
+Theorem Znearest_ge_floor :
+ forall x,
+ (Zfloor x <= Znearest x)%Z.
+Proof.
+intros x.
+destruct (Znearest_DN_or_UP x) as [Hx|Hx] ; rewrite Hx.
+apply Zle_refl.
+apply le_Z2R.
+apply Rle_trans with x.
+apply Zfloor_lb.
+apply Zceil_ub.
+Qed.
+
+Theorem Znearest_le_ceil :
+ forall x,
+ (Znearest x <= Zceil x)%Z.
+Proof.
+intros x.
+destruct (Znearest_DN_or_UP x) as [Hx|Hx] ; rewrite Hx.
+apply le_Z2R.
+apply Rle_trans with x.
+apply Zfloor_lb.
+apply Zceil_ub.
+apply Zle_refl.
+Qed.
+
+Global Instance valid_rnd_N : Valid_rnd Znearest.
+Proof.
+split.
+(* *)
+intros x y Hxy.
+destruct (Rle_or_lt (Z2R (Zceil x)) y) as [H|H].
+apply Zle_trans with (1 := Znearest_le_ceil x).
+apply Zle_trans with (2 := Znearest_ge_floor y).
+now apply Zfloor_lub.
+(* . *)
+assert (Hf: Zfloor y = Zfloor x).
+apply Zfloor_imp.
+split.
+apply Rle_trans with (2 := Zfloor_lb y).
+apply Z2R_le.
+now apply Zfloor_le.
+apply Rlt_le_trans with (1 := H).
+apply Z2R_le.
+apply Zceil_glb.
+apply Rlt_le.
+rewrite Z2R_plus.
+apply Zfloor_ub.
+(* . *)
+unfold Znearest at 1.
+case Rcompare_spec ; intro Hx.
+(* .. *)
+rewrite <- Hf.
+apply Znearest_ge_floor.
+(* .. *)
+unfold Znearest.
+rewrite Hf.
+case Rcompare_spec ; intro Hy.
+elim Rlt_not_le with (1 := Hy).
+rewrite <- Hx.
+now apply Rplus_le_compat_r.
+replace y with x.
+apply Zle_refl.
+apply Rplus_eq_reg_l with (- Z2R (Zfloor x))%R.
+rewrite 2!(Rplus_comm (- (Z2R (Zfloor x)))).
+change (x - Z2R (Zfloor x) = y - Z2R (Zfloor x))%R.
+now rewrite Hy.
+apply Zle_trans with (Zceil x).
+case choice.
+apply Zle_refl.
+apply le_Z2R.
+apply Rle_trans with x.
+apply Zfloor_lb.
+apply Zceil_ub.
+now apply Zceil_le.
+(* .. *)
+unfold Znearest.
+rewrite Hf.
+rewrite Rcompare_Gt.
+now apply Zceil_le.
+apply Rlt_le_trans with (1 := Hx).
+now apply Rplus_le_compat_r.
+(* *)
+intros n.
+unfold Znearest.
+rewrite Zfloor_Z2R.
+rewrite Rcompare_Lt.
+easy.
+unfold Rminus.
+rewrite Rplus_opp_r.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Rcompare_floor_ceil_mid :
+ forall x,
+ Z2R (Zfloor x) <> x ->
+ Rcompare (x - Z2R (Zfloor x)) (/ 2) = Rcompare (x - Z2R (Zfloor x)) (Z2R (Zceil x) - x).
+Proof.
+intros x Hx.
+rewrite Zceil_floor_neq with (1 := Hx).
+rewrite Z2R_plus. simpl.
+destruct (Rcompare_spec (x - Z2R (Zfloor x)) (/ 2)) as [H1|H1|H1] ; apply sym_eq.
+(* . *)
+apply Rcompare_Lt.
+apply Rplus_lt_reg_r with (x - Z2R (Zfloor x))%R.
+replace (x - Z2R (Zfloor x) + (x - Z2R (Zfloor x)))%R with ((x - Z2R (Zfloor x)) * 2)%R by ring.
+replace (x - Z2R (Zfloor x) + (Z2R (Zfloor x) + 1 - x))%R with (/2 * 2)%R by field.
+apply Rmult_lt_compat_r with (2 := H1).
+now apply (Z2R_lt 0 2).
+(* . *)
+apply Rcompare_Eq.
+replace (Z2R (Zfloor x) + 1 - x)%R with (1 - (x - Z2R (Zfloor x)))%R by ring.
+rewrite H1.
+field.
+(* . *)
+apply Rcompare_Gt.
+apply Rplus_lt_reg_r with (x - Z2R (Zfloor x))%R.
+replace (x - Z2R (Zfloor x) + (x - Z2R (Zfloor x)))%R with ((x - Z2R (Zfloor x)) * 2)%R by ring.
+replace (x - Z2R (Zfloor x) + (Z2R (Zfloor x) + 1 - x))%R with (/2 * 2)%R by field.
+apply Rmult_lt_compat_r with (2 := H1).
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Rcompare_ceil_floor_mid :
+ forall x,
+ Z2R (Zfloor x) <> x ->
+ Rcompare (Z2R (Zceil x) - x) (/ 2) = Rcompare (Z2R (Zceil x) - x) (x - Z2R (Zfloor x)).
+Proof.
+intros x Hx.
+rewrite Zceil_floor_neq with (1 := Hx).
+rewrite Z2R_plus. simpl.
+destruct (Rcompare_spec (Z2R (Zfloor x) + 1 - x) (/ 2)) as [H1|H1|H1] ; apply sym_eq.
+(* . *)
+apply Rcompare_Lt.
+apply Rplus_lt_reg_r with (Z2R (Zfloor x) + 1 - x)%R.
+replace (Z2R (Zfloor x) + 1 - x + (Z2R (Zfloor x) + 1 - x))%R with ((Z2R (Zfloor x) + 1 - x) * 2)%R by ring.
+replace (Z2R (Zfloor x) + 1 - x + (x - Z2R (Zfloor x)))%R with (/2 * 2)%R by field.
+apply Rmult_lt_compat_r with (2 := H1).
+now apply (Z2R_lt 0 2).
+(* . *)
+apply Rcompare_Eq.
+replace (x - Z2R (Zfloor x))%R with (1 - (Z2R (Zfloor x) + 1 - x))%R by ring.
+rewrite H1.
+field.
+(* . *)
+apply Rcompare_Gt.
+apply Rplus_lt_reg_r with (Z2R (Zfloor x) + 1 - x)%R.
+replace (Z2R (Zfloor x) + 1 - x + (Z2R (Zfloor x) + 1 - x))%R with ((Z2R (Zfloor x) + 1 - x) * 2)%R by ring.
+replace (Z2R (Zfloor x) + 1 - x + (x - Z2R (Zfloor x)))%R with (/2 * 2)%R by field.
+apply Rmult_lt_compat_r with (2 := H1).
+now apply (Z2R_lt 0 2).
+Qed.
+
+Theorem Znearest_N_strict :
+ forall x,
+ (x - Z2R (Zfloor x) <> /2)%R ->
+ (Rabs (x - Z2R (Znearest x)) < /2)%R.
+Proof.
+intros x Hx.
+unfold Znearest.
+case Rcompare_spec ; intros H.
+rewrite Rabs_pos_eq.
+exact H.
+apply Rle_0_minus.
+apply Zfloor_lb.
+now elim Hx.
+rewrite Rabs_left1.
+rewrite Ropp_minus_distr.
+rewrite Zceil_floor_neq.
+rewrite Z2R_plus.
+simpl.
+apply Ropp_lt_cancel.
+apply Rplus_lt_reg_r with R1.
+replace (1 + -/2)%R with (/2)%R by field.
+now replace (1 + - (Z2R (Zfloor x) + 1 - x))%R with (x - Z2R (Zfloor x))%R by ring.
+apply Rlt_not_eq.
+apply Rplus_lt_reg_r with (- Z2R (Zfloor x))%R.
+apply Rlt_trans with (/2)%R.
+rewrite Rplus_opp_l.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+now rewrite <- (Rplus_comm x).
+apply Rle_minus.
+apply Zceil_ub.
+Qed.
+
+Theorem Znearest_N :
+ forall x,
+ (Rabs (x - Z2R (Znearest x)) <= /2)%R.
+Proof.
+intros x.
+destruct (Req_dec (x - Z2R (Zfloor x)) (/2)) as [Hx|Hx].
+assert (K: (Rabs (/2) <= /2)%R).
+apply Req_le.
+apply Rabs_pos_eq.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+destruct (Znearest_DN_or_UP x) as [H|H] ; rewrite H ; clear H.
+now rewrite Hx.
+rewrite Zceil_floor_neq.
+rewrite Z2R_plus.
+simpl.
+replace (x - (Z2R (Zfloor x) + 1))%R with (x - Z2R (Zfloor x) - 1)%R by ring.
+rewrite Hx.
+rewrite Rabs_minus_sym.
+now replace (1 - /2)%R with (/2)%R by field.
+apply Rlt_not_eq.
+apply Rplus_lt_reg_r with (- Z2R (Zfloor x))%R.
+rewrite Rplus_opp_l, Rplus_comm.
+fold (x - Z2R (Zfloor x))%R.
+rewrite Hx.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply Rlt_le.
+now apply Znearest_N_strict.
+Qed.
+
+Theorem Znearest_imp :
+ forall x n,
+ (Rabs (x - Z2R n) < /2)%R ->
+ Znearest x = n.
+Proof.
+intros x n Hd.
+cut (Zabs (Znearest x - n) < 1)%Z.
+clear ; zify ; omega.
+apply lt_Z2R.
+rewrite Z2R_abs, Z2R_minus.
+replace (Z2R (Znearest x) - Z2R n)%R with (- (x - Z2R (Znearest x)) + (x - Z2R n))%R by ring.
+apply Rle_lt_trans with (1 := Rabs_triang _ _).
+simpl.
+replace R1 with (/2 + /2)%R by field.
+apply Rplus_le_lt_compat with (2 := Hd).
+rewrite Rabs_Ropp.
+apply Znearest_N.
+Qed.
+
+Theorem round_N_pt :
+ forall x,
+ Rnd_N_pt generic_format x (round Znearest x).
+Proof.
+intros x.
+set (d := round Zfloor x).
+set (u := round Zceil x).
+set (mx := scaled_mantissa x).
+set (bx := bpow (canonic_exp x)).
+(* . *)
+assert (H: (Rabs (round Znearest x - x) <= Rmin (x - d) (u - x))%R).
+pattern x at -1 ; rewrite <- scaled_mantissa_mult_bpow.
+unfold d, u, round, F2R. simpl.
+fold mx bx.
+rewrite <- 3!Rmult_minus_distr_r.
+rewrite Rabs_mult, (Rabs_pos_eq bx). 2: apply bpow_ge_0.
+rewrite <- Rmult_min_distr_r. 2: apply bpow_ge_0.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+unfold Znearest.
+destruct (Req_dec (Z2R (Zfloor mx)) mx) as [Hm|Hm].
+(* .. *)
+rewrite Hm.
+unfold Rminus at 2.
+rewrite Rplus_opp_r.
+rewrite Rcompare_Lt.
+rewrite Hm.
+unfold Rminus at -3.
+rewrite Rplus_opp_r.
+rewrite Rabs_R0.
+unfold Rmin.
+destruct (Rle_dec 0 (Z2R (Zceil mx) - mx)) as [H|H].
+apply Rle_refl.
+apply Rle_0_minus.
+apply Zceil_ub.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+(* .. *)
+rewrite Rcompare_floor_ceil_mid with (1 := Hm).
+rewrite Rmin_compare.
+assert (H: (Rabs (mx - Z2R (Zfloor mx)) <= mx - Z2R (Zfloor mx))%R).
+rewrite Rabs_pos_eq.
+apply Rle_refl.
+apply Rle_0_minus.
+apply Zfloor_lb.
+case Rcompare_spec ; intros Hm'.
+now rewrite Rabs_minus_sym.
+case choice.
+rewrite <- Hm'.
+exact H.
+now rewrite Rabs_minus_sym.
+rewrite Rabs_pos_eq.
+apply Rle_refl.
+apply Rle_0_minus.
+apply Zceil_ub.
+(* . *)
+apply Rnd_DN_UP_pt_N with d u.
+apply generic_format_round.
+auto with typeclass_instances.
+now apply round_DN_pt.
+now apply round_UP_pt.
+apply Rle_trans with (1 := H).
+apply Rmin_l.
+apply Rle_trans with (1 := H).
+apply Rmin_r.
+Qed.
+
+Theorem round_N_middle :
+ forall x,
+ (x - round Zfloor x = round Zceil x - x)%R ->
+ round Znearest x = if choice (Zfloor (scaled_mantissa x)) then round Zceil x else round Zfloor x.
+Proof.
+intros x.
+pattern x at 1 4 ; rewrite <- scaled_mantissa_mult_bpow.
+unfold round, Znearest, F2R. simpl.
+destruct (Req_dec (Z2R (Zfloor (scaled_mantissa x))) (scaled_mantissa x)) as [Fx|Fx].
+(* *)
+intros _.
+rewrite <- Fx.
+rewrite Zceil_Z2R, Zfloor_Z2R.
+set (m := Zfloor (scaled_mantissa x)).
+now case (Rcompare (Z2R m - Z2R m) (/ 2)) ; case (choice m).
+(* *)
+intros H.
+rewrite Rcompare_floor_ceil_mid with (1 := Fx).
+rewrite Rcompare_Eq.
+now case choice.
+apply Rmult_eq_reg_r with (bpow (canonic_exp x)).
+now rewrite 2!Rmult_minus_distr_r.
+apply Rgt_not_eq.
+apply bpow_gt_0.
+Qed.
+
+End Znearest.
+
+Section rndNA.
+
+Global Instance valid_rnd_NA : Valid_rnd (Znearest (Zle_bool 0)) := valid_rnd_N _.
+
+Theorem round_NA_pt :
+ forall x,
+ Rnd_NA_pt generic_format x (round (Znearest (Zle_bool 0)) x).
+Proof.
+intros x.
+generalize (round_N_pt (Zle_bool 0) x).
+set (f := round (Znearest (Zle_bool 0)) x).
+intros Rxf.
+destruct (Req_dec (x - round Zfloor x) (round Zceil x - x)) as [Hm|Hm].
+(* *)
+apply Rnd_NA_N_pt.
+exact generic_format_0.
+exact Rxf.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+(* . *)
+rewrite Rabs_pos_eq with (1 := Hx).
+rewrite Rabs_pos_eq.
+unfold f.
+rewrite round_N_middle with (1 := Hm).
+rewrite Zle_bool_true.
+apply (round_UP_pt x).
+apply Zfloor_lub.
+apply Rmult_le_pos with (1 := Hx).
+apply bpow_ge_0.
+apply Rnd_N_pt_pos with (2 := Hx) (3 := Rxf).
+exact generic_format_0.
+(* . *)
+rewrite Rabs_left with (1 := Hx).
+rewrite Rabs_left1.
+apply Ropp_le_contravar.
+unfold f.
+rewrite round_N_middle with (1 := Hm).
+rewrite Zle_bool_false.
+apply (round_DN_pt x).
+apply lt_Z2R.
+apply Rle_lt_trans with (scaled_mantissa x).
+apply Zfloor_lb.
+simpl.
+rewrite <- (Rmult_0_l (bpow (- canonic_exp x))).
+apply Rmult_lt_compat_r with (2 := Hx).
+apply bpow_gt_0.
+apply Rnd_N_pt_neg with (3 := Rxf).
+exact generic_format_0.
+now apply Rlt_le.
+(* *)
+split.
+apply Rxf.
+intros g Rxg.
+rewrite Rnd_N_pt_unicity with (3 := Hm) (4 := Rxf) (5 := Rxg).
+apply Rle_refl.
+apply round_DN_pt.
+apply round_UP_pt.
+Qed.
+
+End rndNA.
+
+Section rndN_opp.
+
+Theorem Znearest_opp :
+ forall choice x,
+ Znearest choice (- x) = (- Znearest (fun t => negb (choice (- (t + 1))%Z)) x)%Z.
+Proof with auto with typeclass_instances.
+intros choice x.
+destruct (Req_dec (Z2R (Zfloor x)) x) as [Hx|Hx].
+rewrite <- Hx.
+rewrite <- Z2R_opp.
+rewrite 2!Zrnd_Z2R...
+unfold Znearest.
+replace (- x - Z2R (Zfloor (-x)))%R with (Z2R (Zceil x) - x)%R.
+rewrite Rcompare_ceil_floor_mid with (1 := Hx).
+rewrite Rcompare_floor_ceil_mid with (1 := Hx).
+rewrite Rcompare_sym.
+rewrite <- Zceil_floor_neq with (1 := Hx).
+unfold Zceil.
+rewrite Ropp_involutive.
+case Rcompare ; simpl ; trivial.
+rewrite Zopp_involutive.
+case (choice (Zfloor (- x))) ; simpl ; trivial.
+now rewrite Zopp_involutive.
+now rewrite Zopp_involutive.
+unfold Zceil.
+rewrite Z2R_opp.
+apply Rplus_comm.
+Qed.
+
+Theorem round_N_opp :
+ forall choice,
+ forall x,
+ round (Znearest choice) (-x) = (- round (Znearest (fun t => negb (choice (- (t + 1))%Z))) x)%R.
+Proof.
+intros choice x.
+unfold round, F2R. simpl.
+rewrite canonic_exp_opp.
+rewrite scaled_mantissa_opp.
+rewrite Znearest_opp.
+rewrite Z2R_opp.
+now rewrite Ropp_mult_distr_l_reverse.
+Qed.
+
+End rndN_opp.
+
+End Format.
+
+(** Inclusion of a format into an extended format *)
+Section Inclusion.
+
+Variables fexp1 fexp2 : Z -> Z.
+
+Context { valid_exp1 : Valid_exp fexp1 }.
+Context { valid_exp2 : Valid_exp fexp2 }.
+
+Theorem generic_inclusion_ln_beta :
+ forall x,
+ ( x <> R0 -> (fexp2 (ln_beta beta x) <= fexp1 (ln_beta beta x))%Z ) ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof.
+intros x He Fx.
+rewrite Fx.
+apply generic_format_F2R.
+intros Zx.
+rewrite <- Fx.
+apply He.
+contradict Zx.
+rewrite Zx, scaled_mantissa_0.
+apply (Ztrunc_Z2R 0).
+Qed.
+
+Theorem generic_inclusion_lt_ge :
+ forall e1 e2,
+ ( forall e, (e1 < e <= e2)%Z -> (fexp2 e <= fexp1 e)%Z ) ->
+ forall x,
+ (bpow e1 <= Rabs x < bpow e2)%R ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof.
+intros e1 e2 He x (Hx1,Hx2).
+apply generic_inclusion_ln_beta.
+intros Zx.
+apply He.
+split.
+now apply ln_beta_gt_bpow.
+now apply ln_beta_le_bpow.
+Qed.
+
+Theorem generic_inclusion :
+ forall e,
+ (fexp2 e <= fexp1 e)%Z ->
+ forall x,
+ (bpow (e - 1) <= Rabs x <= bpow e)%R ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof with auto with typeclass_instances.
+intros e He x (Hx1,[Hx2|Hx2]).
+apply generic_inclusion_ln_beta.
+now rewrite ln_beta_unique with (1 := conj Hx1 Hx2).
+intros Fx.
+apply generic_format_abs_inv.
+rewrite Hx2.
+apply generic_format_bpow'...
+apply Zle_trans with (1 := He).
+apply generic_format_bpow_inv...
+rewrite <- Hx2.
+now apply generic_format_abs.
+Qed.
+
+Theorem generic_inclusion_le_ge :
+ forall e1 e2,
+ (e1 < e2)%Z ->
+ ( forall e, (e1 < e <= e2)%Z -> (fexp2 e <= fexp1 e)%Z ) ->
+ forall x,
+ (bpow e1 <= Rabs x <= bpow e2)%R ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof.
+intros e1 e2 He' He x (Hx1,[Hx2|Hx2]).
+(* *)
+apply generic_inclusion_ln_beta.
+intros Zx.
+apply He.
+split.
+now apply ln_beta_gt_bpow.
+now apply ln_beta_le_bpow.
+(* *)
+apply generic_inclusion with (e := e2).
+apply He.
+split.
+apply He'.
+apply Zle_refl.
+rewrite Hx2.
+split.
+apply bpow_le.
+apply Zle_pred.
+apply Rle_refl.
+Qed.
+
+Theorem generic_inclusion_le :
+ forall e2,
+ ( forall e, (e <= e2)%Z -> (fexp2 e <= fexp1 e)%Z ) ->
+ forall x,
+ (Rabs x <= bpow e2)%R ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof.
+intros e2 He x [Hx|Hx].
+apply generic_inclusion_ln_beta.
+intros Zx.
+apply He.
+now apply ln_beta_le_bpow.
+apply generic_inclusion with (e := e2).
+apply He.
+apply Zle_refl.
+rewrite Hx.
+split.
+apply bpow_le.
+apply Zle_pred.
+apply Rle_refl.
+Qed.
+
+Theorem generic_inclusion_ge :
+ forall e1,
+ ( forall e, (e1 < e)%Z -> (fexp2 e <= fexp1 e)%Z ) ->
+ forall x,
+ (bpow e1 <= Rabs x)%R ->
+ generic_format fexp1 x ->
+ generic_format fexp2 x.
+Proof.
+intros e1 He x Hx.
+apply generic_inclusion_ln_beta.
+intros Zx.
+apply He.
+now apply ln_beta_gt_bpow.
+Qed.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem generic_round_generic :
+ forall x,
+ generic_format fexp1 x ->
+ generic_format fexp1 (round fexp2 rnd x).
+Proof with auto with typeclass_instances.
+intros x Gx.
+apply generic_format_abs_inv.
+apply generic_format_abs in Gx.
+revert rnd valid_rnd x Gx.
+refine (round_abs_abs' _ (fun x y => generic_format fexp1 x -> generic_format fexp1 y) _).
+intros rnd valid_rnd x [Hx|Hx] Gx.
+(* x > 0 *)
+generalize (ln_beta_generic_gt _ x (Rgt_not_eq _ _ Hx) Gx).
+unfold canonic_exp.
+destruct (ln_beta beta x) as (ex,Ex) ; simpl.
+specialize (Ex (Rgt_not_eq _ _ Hx)).
+intros He'.
+rewrite Rabs_pos_eq in Ex by now apply Rlt_le.
+destruct (Zle_or_lt ex (fexp2 ex)) as [He|He].
+(* - x near 0 for fexp2 *)
+destruct (round_bounded_small_pos fexp2 rnd x ex He Ex) as [Hr|Hr].
+rewrite Hr.
+apply generic_format_0.
+rewrite Hr.
+apply generic_format_bpow'...
+apply Zlt_le_weak.
+apply valid_exp_large with ex...
+(* - x large for fexp2 *)
+destruct (Zle_or_lt (canonic_exp fexp2 x) (canonic_exp fexp1 x)) as [He''|He''].
+(* - - round fexp2 x is representable for fexp1 *)
+rewrite round_generic...
+rewrite Gx.
+apply generic_format_F2R.
+fold (round fexp1 Ztrunc x).
+intros Zx.
+unfold canonic_exp at 1.
+rewrite ln_beta_round_ZR...
+contradict Zx.
+apply F2R_eq_0_reg with (1 := Zx).
+(* - - round fexp2 x has too many digits for fexp1 *)
+destruct (round_bounded_large_pos fexp2 rnd x ex He Ex) as (Hr1,[Hr2|Hr2]).
+apply generic_format_F2R.
+intros Zx.
+fold (round fexp2 rnd x).
+unfold canonic_exp at 1.
+rewrite ln_beta_unique_pos with (1 := conj Hr1 Hr2).
+rewrite <- ln_beta_unique_pos with (1 := Ex).
+now apply Zlt_le_weak.
+rewrite Hr2.
+apply generic_format_bpow'...
+now apply Zlt_le_weak.
+(* x = 0 *)
+rewrite <- Hx, round_0...
+apply generic_format_0.
+Qed.
+
+End Inclusion.
+
+End Generic.
+
+Notation ZnearestA := (Znearest (Zle_bool 0)).
+
+(** Notations for backward-compatibility with Flocq 1.4. *)
+Notation rndDN := Zfloor (only parsing).
+Notation rndUP := Zceil (only parsing).
+Notation rndZR := Ztrunc (only parsing).
+Notation rndNA := ZnearestA (only parsing).
diff --git a/flocq/Core/Fcore_rnd.v b/flocq/Core/Fcore_rnd.v
new file mode 100644
index 0000000..6b4d807
--- /dev/null
+++ b/flocq/Core/Fcore_rnd.v
@@ -0,0 +1,1394 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Roundings: properties and/or functions *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+
+Section RND_prop.
+
+Open Scope R_scope.
+
+Theorem round_val_of_pred :
+ forall rnd : R -> R -> Prop,
+ round_pred rnd ->
+ forall x, { f : R | rnd x f }.
+Proof.
+intros rnd (H1,H2) x.
+specialize (H1 x).
+(* . *)
+assert (H3 : bound (rnd x)).
+destruct H1 as (f, H1).
+exists f.
+intros g Hg.
+now apply H2 with (3 := Rle_refl x).
+(* . *)
+exists (projT1 (completeness _ H3 H1)).
+destruct completeness as (f1, (H4, H5)).
+simpl.
+destruct H1 as (f2, H1).
+assert (f1 = f2).
+apply Rle_antisym.
+apply H5.
+intros f3 H.
+now apply H2 with (3 := Rle_refl x).
+now apply H4.
+now rewrite H.
+Qed.
+
+Theorem round_fun_of_pred :
+ forall rnd : R -> R -> Prop,
+ round_pred rnd ->
+ { f : R -> R | forall x, rnd x (f x) }.
+Proof.
+intros rnd H.
+exists (fun x => projT1 (round_val_of_pred rnd H x)).
+intros x.
+now destruct round_val_of_pred as (f, H1).
+Qed.
+
+Theorem round_unicity :
+ forall rnd : R -> R -> Prop,
+ round_pred_monotone rnd ->
+ forall x f1 f2,
+ rnd x f1 ->
+ rnd x f2 ->
+ f1 = f2.
+Proof.
+intros rnd Hr x f1 f2 H1 H2.
+apply Rle_antisym.
+now apply Hr with (3 := Rle_refl x).
+now apply Hr with (3 := Rle_refl x).
+Qed.
+
+Theorem Rnd_DN_pt_monotone :
+ forall F : R -> Prop,
+ round_pred_monotone (Rnd_DN_pt F).
+Proof.
+intros F x y f g (Hx1,(Hx2,_)) (Hy1,(_,Hy2)) Hxy.
+apply Hy2.
+apply Hx1.
+now apply Rle_trans with (2 := Hxy).
+Qed.
+
+Theorem Rnd_DN_pt_unicity :
+ forall F : R -> Prop,
+ forall x f1 f2 : R,
+ Rnd_DN_pt F x f1 -> Rnd_DN_pt F x f2 ->
+ f1 = f2.
+Proof.
+intros F.
+apply round_unicity.
+apply Rnd_DN_pt_monotone.
+Qed.
+
+Theorem Rnd_DN_unicity :
+ forall F : R -> Prop,
+ forall rnd1 rnd2 : R -> R,
+ Rnd_DN F rnd1 -> Rnd_DN F rnd2 ->
+ forall x, rnd1 x = rnd2 x.
+Proof.
+intros F rnd1 rnd2 H1 H2 x.
+now eapply Rnd_DN_pt_unicity.
+Qed.
+
+Theorem Rnd_UP_pt_monotone :
+ forall F : R -> Prop,
+ round_pred_monotone (Rnd_UP_pt F).
+Proof.
+intros F x y f g (Hx1,(_,Hx2)) (Hy1,(Hy2,_)) Hxy.
+apply Hx2.
+apply Hy1.
+now apply Rle_trans with (1 := Hxy).
+Qed.
+
+Theorem Rnd_UP_pt_unicity :
+ forall F : R -> Prop,
+ forall x f1 f2 : R,
+ Rnd_UP_pt F x f1 -> Rnd_UP_pt F x f2 ->
+ f1 = f2.
+Proof.
+intros F.
+apply round_unicity.
+apply Rnd_UP_pt_monotone.
+Qed.
+
+Theorem Rnd_UP_unicity :
+ forall F : R -> Prop,
+ forall rnd1 rnd2 : R -> R,
+ Rnd_UP F rnd1 -> Rnd_UP F rnd2 ->
+ forall x, rnd1 x = rnd2 x.
+Proof.
+intros F rnd1 rnd2 H1 H2 x.
+now eapply Rnd_UP_pt_unicity.
+Qed.
+
+Theorem Rnd_DN_UP_pt_sym :
+ forall F : R -> Prop,
+ ( forall x, F x -> F (- x) ) ->
+ forall x f : R,
+ Rnd_DN_pt F x f -> Rnd_UP_pt F (-x) (-f).
+Proof.
+intros F HF x f H.
+repeat split.
+apply HF.
+apply H.
+apply Ropp_le_contravar.
+apply H.
+intros g Hg.
+rewrite <- (Ropp_involutive g).
+intros Hxg.
+apply Ropp_le_contravar.
+apply H.
+now apply HF.
+now apply Ropp_le_cancel.
+Qed.
+
+Theorem Rnd_UP_DN_pt_sym :
+ forall F : R -> Prop,
+ ( forall x, F x -> F (- x) ) ->
+ forall x f : R,
+ Rnd_UP_pt F x f -> Rnd_DN_pt F (-x) (-f).
+Proof.
+intros F HF x f H.
+repeat split.
+apply HF.
+apply H.
+apply Ropp_le_contravar.
+apply H.
+intros g Hg.
+rewrite <- (Ropp_involutive g).
+intros Hxg.
+apply Ropp_le_contravar.
+apply H.
+now apply HF.
+now apply Ropp_le_cancel.
+Qed.
+
+Theorem Rnd_DN_UP_sym :
+ forall F : R -> Prop,
+ ( forall x, F x -> F (- x) ) ->
+ forall rnd1 rnd2 : R -> R,
+ Rnd_DN F rnd1 -> Rnd_UP F rnd2 ->
+ forall x, rnd1 (- x) = - rnd2 x.
+Proof.
+intros F HF rnd1 rnd2 H1 H2 x.
+rewrite <- (Ropp_involutive (rnd1 (-x))).
+apply f_equal.
+apply (Rnd_UP_unicity F (fun x => - rnd1 (-x))) ; trivial.
+intros y.
+pattern y at 1 ; rewrite <- Ropp_involutive.
+apply Rnd_DN_UP_pt_sym.
+apply HF.
+apply H1.
+Qed.
+
+Theorem Rnd_DN_UP_pt_split :
+ forall F : R -> Prop,
+ forall x d u,
+ Rnd_DN_pt F x d ->
+ Rnd_UP_pt F x u ->
+ forall f, F f ->
+ (f <= d) \/ (u <= f).
+Proof.
+intros F x d u Hd Hu f Hf.
+destruct (Rle_or_lt f x).
+left.
+now apply Hd.
+right.
+assert (H' := Rlt_le _ _ H).
+now apply Hu.
+Qed.
+
+Theorem Rnd_DN_pt_refl :
+ forall F : R -> Prop,
+ forall x : R, F x ->
+ Rnd_DN_pt F x x.
+Proof.
+intros F x Hx.
+repeat split.
+exact Hx.
+apply Rle_refl.
+now intros.
+Qed.
+
+Theorem Rnd_DN_pt_idempotent :
+ forall F : R -> Prop,
+ forall x f : R,
+ Rnd_DN_pt F x f -> F x ->
+ f = x.
+Proof.
+intros F x f (_,(Hx1,Hx2)) Hx.
+apply Rle_antisym.
+exact Hx1.
+apply Hx2.
+exact Hx.
+apply Rle_refl.
+Qed.
+
+Theorem Rnd_UP_pt_refl :
+ forall F : R -> Prop,
+ forall x : R, F x ->
+ Rnd_UP_pt F x x.
+Proof.
+intros F x Hx.
+repeat split.
+exact Hx.
+apply Rle_refl.
+now intros.
+Qed.
+
+Theorem Rnd_UP_pt_idempotent :
+ forall F : R -> Prop,
+ forall x f : R,
+ Rnd_UP_pt F x f -> F x ->
+ f = x.
+Proof.
+intros F x f (_,(Hx1,Hx2)) Hx.
+apply Rle_antisym.
+apply Hx2.
+exact Hx.
+apply Rle_refl.
+exact Hx1.
+Qed.
+
+Theorem Only_DN_or_UP :
+ forall F : R -> Prop,
+ forall x fd fu f : R,
+ Rnd_DN_pt F x fd -> Rnd_UP_pt F x fu ->
+ F f -> (fd <= f <= fu)%R ->
+ f = fd \/ f = fu.
+Proof.
+intros F x fd fu f Hd Hu Hf ([Hdf|Hdf], Hfu).
+2 : now left.
+destruct Hfu.
+2 : now right.
+destruct (Rle_or_lt x f).
+elim Rlt_not_le with (1 := H).
+now apply Hu.
+elim Rlt_not_le with (1 := Hdf).
+apply Hd ; auto with real.
+Qed.
+
+Theorem Rnd_ZR_abs :
+ forall (F : R -> Prop) (rnd: R-> R),
+ Rnd_ZR F rnd ->
+ forall x : R, (Rabs (rnd x) <= Rabs x)%R.
+Proof.
+intros F rnd H x.
+assert (F 0%R).
+replace 0%R with (rnd 0%R).
+eapply H.
+apply Rle_refl.
+destruct (H 0%R) as (H1, H2).
+apply Rle_antisym.
+apply H1.
+apply Rle_refl.
+apply H2.
+apply Rle_refl.
+(* . *)
+destruct (Rle_or_lt 0 x).
+(* positive *)
+rewrite Rabs_right.
+rewrite Rabs_right; auto with real.
+now apply (proj1 (H x)).
+apply Rle_ge.
+now apply (proj1 (H x)).
+(* negative *)
+rewrite Rabs_left1.
+rewrite Rabs_left1 ; auto with real.
+apply Ropp_le_contravar.
+apply (proj2 (H x)).
+auto with real.
+apply (proj2 (H x)) ; auto with real.
+Qed.
+
+Theorem Rnd_ZR_pt_monotone :
+ forall F : R -> Prop, F 0 ->
+ round_pred_monotone (Rnd_ZR_pt F).
+Proof.
+intros F F0 x y f g (Hx1, Hx2) (Hy1, Hy2) Hxy.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+(* . *)
+apply Hy1.
+now apply Rle_trans with x.
+now apply Hx1.
+apply Rle_trans with (2 := Hxy).
+now apply Hx1.
+(* . *)
+apply Rlt_le in Hx.
+destruct (Rle_or_lt 0 y) as [Hy|Hy].
+apply Rle_trans with 0.
+now apply Hx2.
+now apply Hy1.
+apply Rlt_le in Hy.
+apply Hx2.
+exact Hx.
+now apply Hy2.
+apply Rle_trans with (1 := Hxy).
+now apply Hy2.
+Qed.
+
+Theorem Rnd_N_pt_DN_or_UP :
+ forall F : R -> Prop,
+ forall x f : R,
+ Rnd_N_pt F x f ->
+ Rnd_DN_pt F x f \/ Rnd_UP_pt F x f.
+Proof.
+intros F x f (Hf1,Hf2).
+destruct (Rle_or_lt x f) as [Hxf|Hxf].
+(* . *)
+right.
+repeat split ; try assumption.
+intros g Hg Hxg.
+specialize (Hf2 g Hg).
+rewrite 2!Rabs_pos_eq in Hf2.
+now apply Rplus_le_reg_r with (-x)%R.
+now apply Rle_0_minus.
+now apply Rle_0_minus.
+(* . *)
+left.
+repeat split ; try assumption.
+now apply Rlt_le.
+intros g Hg Hxg.
+specialize (Hf2 g Hg).
+rewrite 2!Rabs_left1 in Hf2.
+generalize (Ropp_le_cancel _ _ Hf2).
+intros H.
+now apply Rplus_le_reg_r with (-x)%R.
+now apply Rle_minus.
+apply Rlt_le.
+now apply Rlt_minus.
+Qed.
+
+Theorem Rnd_N_pt_DN_or_UP_eq :
+ forall F : R -> Prop,
+ forall x fd fu f : R,
+ Rnd_DN_pt F x fd -> Rnd_UP_pt F x fu ->
+ Rnd_N_pt F x f ->
+ f = fd \/ f = fu.
+Proof.
+intros F x fd fu f Hd Hu Hf.
+destruct (Rnd_N_pt_DN_or_UP F x f Hf) as [H|H].
+left.
+apply Rnd_DN_pt_unicity with (1 := H) (2 := Hd).
+right.
+apply Rnd_UP_pt_unicity with (1 := H) (2 := Hu).
+Qed.
+
+Theorem Rnd_N_pt_sym :
+ forall F : R -> Prop,
+ ( forall x, F x -> F (- x) ) ->
+ forall x f : R,
+ Rnd_N_pt F (-x) (-f) -> Rnd_N_pt F x f.
+Proof.
+intros F HF x f (H1,H2).
+rewrite <- (Ropp_involutive f).
+repeat split.
+apply HF.
+apply H1.
+intros g H3.
+rewrite Ropp_involutive.
+replace (f - x)%R with (-(-f - -x))%R by ring.
+replace (g - x)%R with (-(-g - -x))%R by ring.
+rewrite 2!Rabs_Ropp.
+apply H2.
+now apply HF.
+Qed.
+
+Theorem Rnd_N_pt_monotone :
+ forall F : R -> Prop,
+ forall x y f g : R,
+ Rnd_N_pt F x f -> Rnd_N_pt F y g ->
+ x < y -> f <= g.
+Proof.
+intros F x y f g (Hf,Hx) (Hg,Hy) Hxy.
+apply Rnot_lt_le.
+intros Hgf.
+assert (Hfgx := Hx g Hg).
+assert (Hgfy := Hy f Hf).
+clear F Hf Hg Hx Hy.
+destruct (Rle_or_lt x g) as [Hxg|Hgx].
+(* x <= g < f *)
+apply Rle_not_lt with (1 := Hfgx).
+rewrite 2!Rabs_pos_eq.
+now apply Rplus_lt_compat_r.
+apply Rle_0_minus.
+apply Rlt_le.
+now apply Rle_lt_trans with (1 := Hxg).
+now apply Rle_0_minus.
+assert (Hgy := Rlt_trans _ _ _ Hgx Hxy).
+destruct (Rle_or_lt f y) as [Hfy|Hyf].
+(* g < f <= y *)
+apply Rle_not_lt with (1 := Hgfy).
+rewrite (Rabs_left (g - y)).
+2: now apply Rlt_minus.
+rewrite Rabs_left1.
+apply Ropp_lt_contravar.
+now apply Rplus_lt_compat_r.
+now apply Rle_minus.
+(* g < x < y < f *)
+rewrite Rabs_left, Rabs_pos_eq, Ropp_minus_distr in Hgfy.
+rewrite Rabs_pos_eq, Rabs_left, Ropp_minus_distr in Hfgx.
+apply Rle_not_lt with (1 := Rplus_le_compat _ _ _ _ Hfgx Hgfy).
+apply Rminus_lt.
+ring_simplify.
+apply Rlt_minus.
+apply Rmult_lt_compat_l.
+now apply (Z2R_lt 0 2).
+exact Hxy.
+now apply Rlt_minus.
+apply Rle_0_minus.
+apply Rlt_le.
+now apply Rlt_trans with (1 := Hxy).
+apply Rle_0_minus.
+now apply Rlt_le.
+now apply Rlt_minus.
+Qed.
+
+Theorem Rnd_N_pt_unicity :
+ forall F : R -> Prop,
+ forall x d u f1 f2 : R,
+ Rnd_DN_pt F x d ->
+ Rnd_UP_pt F x u ->
+ x - d <> u - x ->
+ Rnd_N_pt F x f1 ->
+ Rnd_N_pt F x f2 ->
+ f1 = f2.
+Proof.
+intros F x d u f1 f2 Hd Hu Hdu.
+assert (forall f1 f2, Rnd_N_pt F x f1 -> Rnd_N_pt F x f2 -> f1 < f2 -> False).
+clear f1 f2. intros f1 f2 Hf1 Hf2 H12.
+destruct (Rnd_N_pt_DN_or_UP F x f1 Hf1) as [Hd1|Hu1] ;
+ destruct (Rnd_N_pt_DN_or_UP F x f2 Hf2) as [Hd2|Hu2].
+apply Rlt_not_eq with (1 := H12).
+now apply Rnd_DN_pt_unicity with (1 := Hd1).
+apply Hdu.
+rewrite Rnd_DN_pt_unicity with (1 := Hd) (2 := Hd1).
+rewrite Rnd_UP_pt_unicity with (1 := Hu) (2 := Hu2).
+rewrite <- (Rabs_pos_eq (x - f1)).
+rewrite <- (Rabs_pos_eq (f2 - x)).
+rewrite Rabs_minus_sym.
+apply Rle_antisym.
+apply Hf1. apply Hf2.
+apply Hf2. apply Hf1.
+apply Rle_0_minus.
+apply Hu2.
+apply Rle_0_minus.
+apply Hd1.
+apply Rlt_not_le with (1 := H12).
+apply Rle_trans with x.
+apply Hd2.
+apply Hu1.
+apply Rgt_not_eq with (1 := H12).
+now apply Rnd_UP_pt_unicity with (1 := Hu2).
+intros Hf1 Hf2.
+now apply Rle_antisym ; apply Rnot_lt_le ; refine (H _ _ _ _).
+Qed.
+
+Theorem Rnd_N_pt_refl :
+ forall F : R -> Prop,
+ forall x : R, F x ->
+ Rnd_N_pt F x x.
+Proof.
+intros F x Hx.
+repeat split.
+exact Hx.
+intros g _.
+unfold Rminus at 1.
+rewrite Rplus_opp_r, Rabs_R0.
+apply Rabs_pos.
+Qed.
+
+Theorem Rnd_N_pt_idempotent :
+ forall F : R -> Prop,
+ forall x f : R,
+ Rnd_N_pt F x f -> F x ->
+ f = x.
+Proof.
+intros F x f (_,Hf) Hx.
+apply Rminus_diag_uniq.
+destruct (Req_dec (f - x) 0) as [H|H].
+exact H.
+elim Rabs_no_R0 with (1 := H).
+apply Rle_antisym.
+replace 0 with (Rabs (x - x)).
+now apply Hf.
+unfold Rminus.
+rewrite Rplus_opp_r.
+apply Rabs_R0.
+apply Rabs_pos.
+Qed.
+
+Theorem Rnd_N_pt_0 :
+ forall F : R -> Prop,
+ F 0 ->
+ Rnd_N_pt F 0 0.
+Proof.
+intros F HF.
+split.
+exact HF.
+intros g _.
+rewrite 2!Rminus_0_r, Rabs_R0.
+apply Rabs_pos.
+Qed.
+
+Theorem Rnd_N_pt_pos :
+ forall F : R -> Prop, F 0 ->
+ forall x f, 0 <= x ->
+ Rnd_N_pt F x f ->
+ 0 <= f.
+Proof.
+intros F HF x f [Hx|Hx] Hxf.
+eapply Rnd_N_pt_monotone ; try eassumption.
+now apply Rnd_N_pt_0.
+right.
+apply sym_eq.
+apply Rnd_N_pt_idempotent with F.
+now rewrite Hx.
+exact HF.
+Qed.
+
+Theorem Rnd_N_pt_neg :
+ forall F : R -> Prop, F 0 ->
+ forall x f, x <= 0 ->
+ Rnd_N_pt F x f ->
+ f <= 0.
+Proof.
+intros F HF x f [Hx|Hx] Hxf.
+eapply Rnd_N_pt_monotone ; try eassumption.
+now apply Rnd_N_pt_0.
+right.
+apply Rnd_N_pt_idempotent with F.
+now rewrite <- Hx.
+exact HF.
+Qed.
+
+Theorem Rnd_N_pt_abs :
+ forall F : R -> Prop,
+ F 0 ->
+ ( forall x, F x -> F (- x) ) ->
+ forall x f : R,
+ Rnd_N_pt F x f -> Rnd_N_pt F (Rabs x) (Rabs f).
+Proof.
+intros F HF0 HF x f Hxf.
+unfold Rabs at 1.
+destruct (Rcase_abs x) as [Hx|Hx].
+rewrite Rabs_left1.
+apply Rnd_N_pt_sym.
+exact HF.
+now rewrite 2!Ropp_involutive.
+apply Rnd_N_pt_neg with (3 := Hxf).
+exact HF0.
+now apply Rlt_le.
+rewrite Rabs_pos_eq.
+exact Hxf.
+apply Rnd_N_pt_pos with (3 := Hxf).
+exact HF0.
+now apply Rge_le.
+Qed.
+
+Theorem Rnd_DN_UP_pt_N :
+ forall F : R -> Prop,
+ forall x d u f : R,
+ F f ->
+ Rnd_DN_pt F x d ->
+ Rnd_UP_pt F x u ->
+ (Rabs (f - x) <= x - d)%R ->
+ (Rabs (f - x) <= u - x)%R ->
+ Rnd_N_pt F x f.
+Proof.
+intros F x d u f Hf Hxd Hxu Hd Hu.
+split.
+exact Hf.
+intros g Hg.
+destruct (Rnd_DN_UP_pt_split F x d u Hxd Hxu g Hg) as [Hgd|Hgu].
+(* g <= d *)
+apply Rle_trans with (1 := Hd).
+rewrite Rabs_left1.
+rewrite Ropp_minus_distr.
+apply Rplus_le_compat_l.
+now apply Ropp_le_contravar.
+apply Rle_minus.
+apply Rle_trans with (1 := Hgd).
+apply Hxd.
+(* u <= g *)
+apply Rle_trans with (1 := Hu).
+rewrite Rabs_pos_eq.
+now apply Rplus_le_compat_r.
+apply Rle_0_minus.
+apply Rle_trans with (2 := Hgu).
+apply Hxu.
+Qed.
+
+Theorem Rnd_DN_pt_N :
+ forall F : R -> Prop,
+ forall x d u : R,
+ Rnd_DN_pt F x d ->
+ Rnd_UP_pt F x u ->
+ (x - d <= u - x)%R ->
+ Rnd_N_pt F x d.
+Proof.
+intros F x d u Hd Hu Hx.
+assert (Hdx: (Rabs (d - x) = x - d)%R).
+rewrite Rabs_minus_sym.
+apply Rabs_pos_eq.
+apply Rle_0_minus.
+apply Hd.
+apply Rnd_DN_UP_pt_N with (2 := Hd) (3 := Hu).
+apply Hd.
+rewrite Hdx.
+apply Rle_refl.
+now rewrite Hdx.
+Qed.
+
+Theorem Rnd_UP_pt_N :
+ forall F : R -> Prop,
+ forall x d u : R,
+ Rnd_DN_pt F x d ->
+ Rnd_UP_pt F x u ->
+ (u - x <= x - d)%R ->
+ Rnd_N_pt F x u.
+Proof.
+intros F x d u Hd Hu Hx.
+assert (Hux: (Rabs (u - x) = u - x)%R).
+apply Rabs_pos_eq.
+apply Rle_0_minus.
+apply Hu.
+apply Rnd_DN_UP_pt_N with (2 := Hd) (3 := Hu).
+apply Hu.
+now rewrite Hux.
+rewrite Hux.
+apply Rle_refl.
+Qed.
+
+Definition Rnd_NG_pt_unicity_prop F P :=
+ forall x d u,
+ Rnd_DN_pt F x d -> Rnd_N_pt F x d ->
+ Rnd_UP_pt F x u -> Rnd_N_pt F x u ->
+ P x d -> P x u -> d = u.
+
+Theorem Rnd_NG_pt_unicity :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ Rnd_NG_pt_unicity_prop F P ->
+ forall x f1 f2 : R,
+ Rnd_NG_pt F P x f1 -> Rnd_NG_pt F P x f2 ->
+ f1 = f2.
+Proof.
+intros F P HP x f1 f2 (H1a,H1b) (H2a,H2b).
+destruct H1b as [H1b|H1b].
+destruct H2b as [H2b|H2b].
+destruct (Rnd_N_pt_DN_or_UP _ _ _ H1a) as [H1c|H1c] ;
+ destruct (Rnd_N_pt_DN_or_UP _ _ _ H2a) as [H2c|H2c].
+eapply Rnd_DN_pt_unicity ; eassumption.
+now apply (HP x f1 f2).
+apply sym_eq.
+now apply (HP x f2 f1 H2c H2a H1c H1a).
+eapply Rnd_UP_pt_unicity ; eassumption.
+now apply H2b.
+apply sym_eq.
+now apply H1b.
+Qed.
+
+Theorem Rnd_NG_pt_monotone :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ Rnd_NG_pt_unicity_prop F P ->
+ round_pred_monotone (Rnd_NG_pt F P).
+Proof.
+intros F P HP x y f g (Hf,Hx) (Hg,Hy) [Hxy|Hxy].
+now apply Rnd_N_pt_monotone with F x y.
+apply Req_le.
+rewrite <- Hxy in Hg, Hy.
+eapply Rnd_NG_pt_unicity ; try split ; eassumption.
+Qed.
+
+Theorem Rnd_NG_pt_refl :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ forall x, F x -> Rnd_NG_pt F P x x.
+Proof.
+intros F P x Hx.
+split.
+now apply Rnd_N_pt_refl.
+right.
+intros f2 Hf2.
+now apply Rnd_N_pt_idempotent with F.
+Qed.
+
+Theorem Rnd_NG_pt_sym :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ ( forall x, F x -> F (-x) ) ->
+ ( forall x f, P x f -> P (-x) (-f) ) ->
+ forall x f : R,
+ Rnd_NG_pt F P (-x) (-f) -> Rnd_NG_pt F P x f.
+Proof.
+intros F P HF HP x f (H1,H2).
+split.
+now apply Rnd_N_pt_sym.
+destruct H2 as [H2|H2].
+left.
+rewrite <- (Ropp_involutive x), <- (Ropp_involutive f).
+now apply HP.
+right.
+intros f2 Hxf2.
+rewrite <- (Ropp_involutive f).
+rewrite <- H2 with (-f2).
+apply sym_eq.
+apply Ropp_involutive.
+apply Rnd_N_pt_sym.
+exact HF.
+now rewrite 2!Ropp_involutive.
+Qed.
+
+Theorem Rnd_NG_unicity :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ Rnd_NG_pt_unicity_prop F P ->
+ forall rnd1 rnd2 : R -> R,
+ Rnd_NG F P rnd1 -> Rnd_NG F P rnd2 ->
+ forall x, rnd1 x = rnd2 x.
+Proof.
+intros F P HP rnd1 rnd2 H1 H2 x.
+now apply Rnd_NG_pt_unicity with F P x.
+Qed.
+
+Theorem Rnd_NA_NG_pt :
+ forall F : R -> Prop,
+ F 0 ->
+ forall x f,
+ Rnd_NA_pt F x f <-> Rnd_NG_pt F (fun x f => Rabs x <= Rabs f) x f.
+Proof.
+intros F HF x f.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+(* *)
+split ; intros (H1, H2).
+(* . *)
+assert (Hf := Rnd_N_pt_pos F HF x f Hx H1).
+split.
+exact H1.
+destruct (Rnd_N_pt_DN_or_UP _ _ _ H1) as [H3|H3].
+(* . . *)
+right.
+intros f2 Hxf2.
+specialize (H2 _ Hxf2).
+destruct (Rnd_N_pt_DN_or_UP _ _ _ Hxf2) as [H4|H4].
+eapply Rnd_DN_pt_unicity ; eassumption.
+apply Rle_antisym.
+rewrite Rabs_pos_eq with (1 := Hf) in H2.
+rewrite Rabs_pos_eq in H2.
+exact H2.
+now apply Rnd_N_pt_pos with F x.
+apply Rle_trans with x.
+apply H3.
+apply H4.
+(* . . *)
+left.
+rewrite Rabs_pos_eq with (1 := Hf).
+rewrite Rabs_pos_eq with (1 := Hx).
+apply H3.
+(* . *)
+split.
+exact H1.
+intros f2 Hxf2.
+destruct H2 as [H2|H2].
+assert (Hf := Rnd_N_pt_pos F HF x f Hx H1).
+assert (Hf2 := Rnd_N_pt_pos F HF x f2 Hx Hxf2).
+rewrite 2!Rabs_pos_eq ; trivial.
+rewrite 2!Rabs_pos_eq in H2 ; trivial.
+destruct (Rnd_N_pt_DN_or_UP _ _ _ Hxf2) as [H3|H3].
+apply Rle_trans with (2 := H2).
+apply H3.
+apply H3.
+apply H1.
+apply H2.
+rewrite (H2 _ Hxf2).
+apply Rle_refl.
+(* *)
+assert (Hx' := Rlt_le _ _ Hx).
+clear Hx. rename Hx' into Hx.
+split ; intros (H1, H2).
+(* . *)
+assert (Hf := Rnd_N_pt_neg F HF x f Hx H1).
+split.
+exact H1.
+destruct (Rnd_N_pt_DN_or_UP _ _ _ H1) as [H3|H3].
+(* . . *)
+left.
+rewrite Rabs_left1 with (1 := Hf).
+rewrite Rabs_left1 with (1 := Hx).
+apply Ropp_le_contravar.
+apply H3.
+(* . . *)
+right.
+intros f2 Hxf2.
+specialize (H2 _ Hxf2).
+destruct (Rnd_N_pt_DN_or_UP _ _ _ Hxf2) as [H4|H4].
+apply Rle_antisym.
+apply Rle_trans with x.
+apply H4.
+apply H3.
+rewrite Rabs_left1 with (1 := Hf) in H2.
+rewrite Rabs_left1 in H2.
+now apply Ropp_le_cancel.
+now apply Rnd_N_pt_neg with F x.
+eapply Rnd_UP_pt_unicity ; eassumption.
+(* . *)
+split.
+exact H1.
+intros f2 Hxf2.
+destruct H2 as [H2|H2].
+assert (Hf := Rnd_N_pt_neg F HF x f Hx H1).
+assert (Hf2 := Rnd_N_pt_neg F HF x f2 Hx Hxf2).
+rewrite 2!Rabs_left1 ; trivial.
+rewrite 2!Rabs_left1 in H2 ; trivial.
+apply Ropp_le_contravar.
+apply Ropp_le_cancel in H2.
+destruct (Rnd_N_pt_DN_or_UP _ _ _ Hxf2) as [H3|H3].
+apply H3.
+apply H1.
+apply H2.
+apply Rle_trans with (1 := H2).
+apply H3.
+rewrite (H2 _ Hxf2).
+apply Rle_refl.
+Qed.
+
+Theorem Rnd_NA_pt_unicity_prop :
+ forall F : R -> Prop,
+ F 0 ->
+ Rnd_NG_pt_unicity_prop F (fun a b => (Rabs a <= Rabs b)%R).
+Proof.
+intros F HF x d u Hxd1 Hxd2 Hxu1 Hxu2 Hd Hu.
+apply Rle_antisym.
+apply Rle_trans with x.
+apply Hxd1.
+apply Hxu1.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+apply Hxu1.
+apply Hxd1.
+rewrite Rabs_pos_eq with (1 := Hx) in Hd.
+rewrite Rabs_pos_eq in Hd.
+exact Hd.
+now apply Hxd1.
+apply Hxd1.
+apply Hxu1.
+rewrite Rabs_left with (1 := Hx) in Hu.
+rewrite Rabs_left1 in Hu.
+now apply Ropp_le_cancel.
+apply Hxu1.
+apply HF.
+now apply Rlt_le.
+Qed.
+
+Theorem Rnd_NA_pt_unicity :
+ forall F : R -> Prop,
+ F 0 ->
+ forall x f1 f2 : R,
+ Rnd_NA_pt F x f1 -> Rnd_NA_pt F x f2 ->
+ f1 = f2.
+Proof.
+intros F HF x f1 f2 H1 H2.
+apply (Rnd_NG_pt_unicity F _ (Rnd_NA_pt_unicity_prop F HF) x).
+now apply -> Rnd_NA_NG_pt.
+now apply -> Rnd_NA_NG_pt.
+Qed.
+
+Theorem Rnd_NA_N_pt :
+ forall F : R -> Prop,
+ F 0 ->
+ forall x f : R,
+ Rnd_N_pt F x f ->
+ (Rabs x <= Rabs f)%R ->
+ Rnd_NA_pt F x f.
+Proof.
+intros F HF x f Rxf Hxf.
+split.
+apply Rxf.
+intros g Rxg.
+destruct (Rabs_eq_Rabs (f - x) (g - x)) as [H|H].
+apply Rle_antisym.
+apply Rxf.
+apply Rxg.
+apply Rxg.
+apply Rxf.
+(* *)
+replace g with f.
+apply Rle_refl.
+apply Rplus_eq_reg_r with (1 := H).
+(* *)
+assert (g = 2 * x - f)%R.
+replace (2 * x - f)%R with (x - (f - x))%R by ring.
+rewrite H.
+ring.
+destruct (Rle_lt_dec 0 x) as [Hx|Hx].
+(* . *)
+revert Hxf.
+rewrite Rabs_pos_eq with (1 := Hx).
+rewrite 2!Rabs_pos_eq ; try ( apply (Rnd_N_pt_pos F HF x) ; assumption ).
+intros Hxf.
+rewrite H0.
+apply Rplus_le_reg_r with f.
+ring_simplify.
+apply Rmult_le_compat_l with (2 := Hxf).
+now apply (Z2R_le 0 2).
+(* . *)
+revert Hxf.
+apply Rlt_le in Hx.
+rewrite Rabs_left1 with (1 := Hx).
+rewrite 2!Rabs_left1 ; try ( apply (Rnd_N_pt_neg F HF x) ; assumption ).
+intros Hxf.
+rewrite H0.
+apply Ropp_le_contravar.
+apply Rplus_le_reg_r with f.
+ring_simplify.
+apply Rmult_le_compat_l.
+now apply (Z2R_le 0 2).
+now apply Ropp_le_cancel.
+Qed.
+
+Theorem Rnd_NA_unicity :
+ forall (F : R -> Prop),
+ F 0 ->
+ forall rnd1 rnd2 : R -> R,
+ Rnd_NA F rnd1 -> Rnd_NA F rnd2 ->
+ forall x, rnd1 x = rnd2 x.
+Proof.
+intros F HF rnd1 rnd2 H1 H2 x.
+now apply Rnd_NA_pt_unicity with F x.
+Qed.
+
+Theorem Rnd_NA_pt_monotone :
+ forall F : R -> Prop,
+ F 0 ->
+ round_pred_monotone (Rnd_NA_pt F).
+Proof.
+intros F HF x y f g Hxf Hyg Hxy.
+apply (Rnd_NG_pt_monotone F _ (Rnd_NA_pt_unicity_prop F HF) x y).
+now apply -> Rnd_NA_NG_pt.
+now apply -> Rnd_NA_NG_pt.
+exact Hxy.
+Qed.
+
+Theorem Rnd_NA_pt_refl :
+ forall F : R -> Prop,
+ forall x : R, F x ->
+ Rnd_NA_pt F x x.
+Proof.
+intros F x Hx.
+split.
+now apply Rnd_N_pt_refl.
+intros f Hxf.
+apply Req_le.
+apply f_equal.
+now apply Rnd_N_pt_idempotent with (1 := Hxf).
+Qed.
+
+Theorem Rnd_NA_pt_idempotent :
+ forall F : R -> Prop,
+ forall x f : R,
+ Rnd_NA_pt F x f -> F x ->
+ f = x.
+Proof.
+intros F x f (Hf,_) Hx.
+now apply Rnd_N_pt_idempotent with F.
+Qed.
+
+Theorem round_pred_ge_0 :
+ forall P : R -> R -> Prop,
+ round_pred_monotone P ->
+ P 0 0 ->
+ forall x f, P x f -> 0 <= x -> 0 <= f.
+Proof.
+intros P HP HP0 x f Hxf Hx.
+now apply (HP 0 x).
+Qed.
+
+Theorem round_pred_gt_0 :
+ forall P : R -> R -> Prop,
+ round_pred_monotone P ->
+ P 0 0 ->
+ forall x f, P x f -> 0 < f -> 0 < x.
+Proof.
+intros P HP HP0 x f Hxf Hf.
+apply Rnot_le_lt.
+intros Hx.
+apply Rlt_not_le with (1 := Hf).
+now apply (HP x 0).
+Qed.
+
+Theorem round_pred_le_0 :
+ forall P : R -> R -> Prop,
+ round_pred_monotone P ->
+ P 0 0 ->
+ forall x f, P x f -> x <= 0 -> f <= 0.
+Proof.
+intros P HP HP0 x f Hxf Hx.
+now apply (HP x 0).
+Qed.
+
+Theorem round_pred_lt_0 :
+ forall P : R -> R -> Prop,
+ round_pred_monotone P ->
+ P 0 0 ->
+ forall x f, P x f -> f < 0 -> x < 0.
+Proof.
+intros P HP HP0 x f Hxf Hf.
+apply Rnot_le_lt.
+intros Hx.
+apply Rlt_not_le with (1 := Hf).
+now apply (HP 0 x).
+Qed.
+
+Theorem Rnd_DN_pt_equiv_format :
+ forall F1 F2 : R -> Prop,
+ forall a b : R,
+ F1 a ->
+ ( forall x, a <= x <= b -> (F1 x <-> F2 x) ) ->
+ forall x f, a <= x <= b -> Rnd_DN_pt F1 x f -> Rnd_DN_pt F2 x f.
+Proof.
+intros F1 F2 a b Ha HF x f Hx (H1, (H2, H3)).
+split.
+apply -> HF.
+exact H1.
+split.
+now apply H3.
+now apply Rle_trans with (1 := H2).
+split.
+exact H2.
+intros k Hk Hl.
+destruct (Rlt_or_le k a) as [H|H].
+apply Rlt_le.
+apply Rlt_le_trans with (1 := H).
+now apply H3.
+apply H3.
+apply <- HF.
+exact Hk.
+split.
+exact H.
+now apply Rle_trans with (1 := Hl).
+exact Hl.
+Qed.
+
+Theorem Rnd_UP_pt_equiv_format :
+ forall F1 F2 : R -> Prop,
+ forall a b : R,
+ F1 b ->
+ ( forall x, a <= x <= b -> (F1 x <-> F2 x) ) ->
+ forall x f, a <= x <= b -> Rnd_UP_pt F1 x f -> Rnd_UP_pt F2 x f.
+Proof.
+intros F1 F2 a b Hb HF x f Hx (H1, (H2, H3)).
+split.
+apply -> HF.
+exact H1.
+split.
+now apply Rle_trans with (2 := H2).
+now apply H3.
+split.
+exact H2.
+intros k Hk Hl.
+destruct (Rle_or_lt k b) as [H|H].
+apply H3.
+apply <- HF.
+exact Hk.
+split.
+now apply Rle_trans with (2 := Hl).
+exact H.
+exact Hl.
+apply Rlt_le.
+apply Rle_lt_trans with (2 := H).
+now apply H3.
+Qed.
+
+(** ensures a real number can always be rounded *)
+Inductive satisfies_any (F : R -> Prop) :=
+ Satisfies_any :
+ F 0 -> ( forall x : R, F x -> F (-x) ) ->
+ round_pred_total (Rnd_DN_pt F) -> satisfies_any F.
+
+Theorem satisfies_any_eq :
+ forall F1 F2 : R -> Prop,
+ ( forall x, F1 x <-> F2 x ) ->
+ satisfies_any F1 ->
+ satisfies_any F2.
+Proof.
+intros F1 F2 Heq (Hzero, Hsym, Hrnd).
+split.
+now apply -> Heq.
+intros x Hx.
+apply -> Heq.
+apply Hsym.
+now apply <- Heq.
+intros x.
+destruct (Hrnd x) as (f, (H1, (H2, H3))).
+exists f.
+split.
+now apply -> Heq.
+split.
+exact H2.
+intros g Hg Hgx.
+apply H3.
+now apply <- Heq.
+exact Hgx.
+Qed.
+
+Theorem satisfies_any_imp_DN :
+ forall F : R -> Prop,
+ satisfies_any F ->
+ round_pred (Rnd_DN_pt F).
+Proof.
+intros F (_,_,Hrnd).
+split.
+apply Hrnd.
+apply Rnd_DN_pt_monotone.
+Qed.
+
+Theorem satisfies_any_imp_UP :
+ forall F : R -> Prop,
+ satisfies_any F ->
+ round_pred (Rnd_UP_pt F).
+Proof.
+intros F Hany.
+split.
+intros x.
+destruct (proj1 (satisfies_any_imp_DN F Hany) (-x)) as (f, Hf).
+exists (-f).
+rewrite <- (Ropp_involutive x).
+apply Rnd_DN_UP_pt_sym.
+apply Hany.
+exact Hf.
+apply Rnd_UP_pt_monotone.
+Qed.
+
+Theorem satisfies_any_imp_ZR :
+ forall F : R -> Prop,
+ satisfies_any F ->
+ round_pred (Rnd_ZR_pt F).
+Proof.
+intros F Hany.
+split.
+intros x.
+destruct (Rle_or_lt 0 x) as [Hx|Hx].
+(* positive *)
+destruct (proj1 (satisfies_any_imp_DN F Hany) x) as (f, Hf).
+exists f.
+split.
+now intros _.
+intros Hx'.
+(* zero *)
+assert (x = 0).
+now apply Rle_antisym.
+rewrite H in Hf |- *.
+clear H Hx Hx'.
+rewrite Rnd_DN_pt_idempotent with (1 := Hf).
+apply Rnd_UP_pt_refl.
+apply Hany.
+apply Hany.
+(* negative *)
+destruct (proj1 (satisfies_any_imp_UP F Hany) x) as (f, Hf).
+exists f.
+split.
+intros Hx'.
+elim (Rlt_irrefl 0).
+now apply Rle_lt_trans with x.
+now intros _.
+(* . *)
+apply Rnd_ZR_pt_monotone.
+apply Hany.
+Qed.
+
+Definition NG_existence_prop (F : R -> Prop) (P : R -> R -> Prop) :=
+ forall x d u, ~F x -> Rnd_DN_pt F x d -> Rnd_UP_pt F x u -> P x u \/ P x d.
+
+Theorem satisfies_any_imp_NG :
+ forall (F : R -> Prop) (P : R -> R -> Prop),
+ satisfies_any F ->
+ NG_existence_prop F P ->
+ round_pred_total (Rnd_NG_pt F P).
+Proof.
+intros F P Hany HP x.
+destruct (proj1 (satisfies_any_imp_DN F Hany) x) as (d, Hd).
+destruct (proj1 (satisfies_any_imp_UP F Hany) x) as (u, Hu).
+destruct (total_order_T (Rabs (u - x)) (Rabs (d - x))) as [[H|H]|H].
+(* |up(x) - x| < |dn(x) - x| *)
+exists u.
+split.
+(* - . *)
+split.
+apply Hu.
+intros g Hg.
+destruct (Rle_or_lt x g) as [Hxg|Hxg].
+rewrite 2!Rabs_pos_eq.
+apply Rplus_le_compat_r.
+now apply Hu.
+now apply Rle_0_minus.
+apply Rle_0_minus.
+apply Hu.
+apply Rlt_le in Hxg.
+apply Rlt_le.
+apply Rlt_le_trans with (1 := H).
+do 2 rewrite <- (Rabs_minus_sym x).
+rewrite 2!Rabs_pos_eq.
+apply Rplus_le_compat_l.
+apply Ropp_le_contravar.
+now apply Hd.
+now apply Rle_0_minus.
+apply Rle_0_minus.
+apply Hd.
+(* - . *)
+right.
+intros f Hf.
+destruct (Rnd_N_pt_DN_or_UP_eq F x _ _ _ Hd Hu Hf) as [K|K] ; rewrite K.
+elim Rlt_not_le with (1 := H).
+rewrite <- K.
+apply Hf.
+apply Hu.
+apply refl_equal.
+(* |up(x) - x| = |dn(x) - x| *)
+destruct (Req_dec x d) as [He|Hne].
+(* - x = d = u *)
+exists x.
+split.
+apply Rnd_N_pt_refl.
+rewrite He.
+apply Hd.
+right.
+intros.
+apply Rnd_N_pt_idempotent with (1 := H0).
+rewrite He.
+apply Hd.
+assert (Hf : ~F x).
+intros Hf.
+apply Hne.
+apply sym_eq.
+now apply Rnd_DN_pt_idempotent with (1 := Hd).
+destruct (HP x _ _ Hf Hd Hu) as [H'|H'].
+(* - u >> d *)
+exists u.
+split.
+split.
+apply Hu.
+intros g Hg.
+destruct (Rle_or_lt x g) as [Hxg|Hxg].
+rewrite 2!Rabs_pos_eq.
+apply Rplus_le_compat_r.
+now apply Hu.
+now apply Rle_0_minus.
+apply Rle_0_minus.
+apply Hu.
+apply Rlt_le in Hxg.
+rewrite H.
+rewrite 2!Rabs_left1.
+apply Ropp_le_contravar.
+apply Rplus_le_compat_r.
+now apply Hd.
+now apply Rle_minus.
+apply Rle_minus.
+apply Hd.
+now left.
+(* - d >> u *)
+exists d.
+split.
+split.
+apply Hd.
+intros g Hg.
+destruct (Rle_or_lt x g) as [Hxg|Hxg].
+rewrite <- H.
+rewrite 2!Rabs_pos_eq.
+apply Rplus_le_compat_r.
+now apply Hu.
+now apply Rle_0_minus.
+apply Rle_0_minus.
+apply Hu.
+apply Rlt_le in Hxg.
+rewrite 2!Rabs_left1.
+apply Ropp_le_contravar.
+apply Rplus_le_compat_r.
+now apply Hd.
+now apply Rle_minus.
+apply Rle_minus.
+apply Hd.
+now left.
+(* |up(x) - x| > |dn(x) - x| *)
+exists d.
+split.
+split.
+apply Hd.
+intros g Hg.
+destruct (Rle_or_lt x g) as [Hxg|Hxg].
+apply Rlt_le.
+apply Rlt_le_trans with (1 := H).
+rewrite 2!Rabs_pos_eq.
+apply Rplus_le_compat_r.
+now apply Hu.
+now apply Rle_0_minus.
+apply Rle_0_minus.
+apply Hu.
+apply Rlt_le in Hxg.
+rewrite 2!Rabs_left1.
+apply Ropp_le_contravar.
+apply Rplus_le_compat_r.
+now apply Hd.
+now apply Rle_minus.
+apply Rle_minus.
+apply Hd.
+right.
+intros f Hf.
+destruct (Rnd_N_pt_DN_or_UP_eq F x _ _ _ Hd Hu Hf) as [K|K] ; rewrite K.
+apply refl_equal.
+elim Rlt_not_le with (1 := H).
+rewrite <- K.
+apply Hf.
+apply Hd.
+Qed.
+
+Theorem satisfies_any_imp_NA :
+ forall F : R -> Prop,
+ satisfies_any F ->
+ round_pred (Rnd_NA_pt F).
+Proof.
+intros F Hany.
+split.
+assert (H : round_pred_total (Rnd_NG_pt F (fun a b => (Rabs a <= Rabs b)%R))).
+apply satisfies_any_imp_NG.
+apply Hany.
+intros x d u Hf Hd Hu.
+destruct (Rle_lt_dec 0 x) as [Hx|Hx].
+left.
+rewrite Rabs_pos_eq with (1 := Hx).
+rewrite Rabs_pos_eq.
+apply Hu.
+apply Rle_trans with (1 := Hx).
+apply Hu.
+right.
+rewrite Rabs_left with (1 := Hx).
+rewrite Rabs_left1.
+apply Ropp_le_contravar.
+apply Hd.
+apply Rlt_le in Hx.
+apply Rle_trans with (2 := Hx).
+apply Hd.
+intros x.
+destruct (H x) as (f, Hf).
+exists f.
+apply <- Rnd_NA_NG_pt.
+apply Hf.
+apply Hany.
+apply Rnd_NA_pt_monotone.
+apply Hany.
+Qed.
+
+End RND_prop.
diff --git a/flocq/Core/Fcore_rnd_ne.v b/flocq/Core/Fcore_rnd_ne.v
new file mode 100644
index 0000000..0b0776e
--- /dev/null
+++ b/flocq/Core/Fcore_rnd_ne.v
@@ -0,0 +1,531 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Rounding to nearest, ties to even: existence, unicity... *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_float_prop.
+Require Import Fcore_ulp.
+
+Notation ZnearestE := (Znearest (fun x => negb (Zeven x))).
+
+Section Fcore_rnd_NE.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Variable fexp : Z -> Z.
+
+Context { valid_exp : Valid_exp fexp }.
+
+Notation format := (generic_format beta fexp).
+Notation canonic := (canonic beta fexp).
+
+Definition NE_prop (_ : R) f :=
+ exists g : float beta, f = F2R g /\ canonic g /\ Zeven (Fnum g) = true.
+
+Definition Rnd_NE_pt :=
+ Rnd_NG_pt format NE_prop.
+
+Definition DN_UP_parity_pos_prop :=
+ forall x xd xu,
+ (0 < x)%R ->
+ ~ format x ->
+ canonic xd ->
+ canonic xu ->
+ F2R xd = round beta fexp Zfloor x ->
+ F2R xu = round beta fexp Zceil x ->
+ Zeven (Fnum xu) = negb (Zeven (Fnum xd)).
+
+Definition DN_UP_parity_prop :=
+ forall x xd xu,
+ ~ format x ->
+ canonic xd ->
+ canonic xu ->
+ F2R xd = round beta fexp Zfloor x ->
+ F2R xu = round beta fexp Zceil x ->
+ Zeven (Fnum xu) = negb (Zeven (Fnum xd)).
+
+Lemma DN_UP_parity_aux :
+ DN_UP_parity_pos_prop ->
+ DN_UP_parity_prop.
+Proof.
+intros Hpos x xd xu Hfx Hd Hu Hxd Hxu.
+destruct (total_order_T 0 x) as [[Hx|Hx]|Hx].
+(* . *)
+exact (Hpos x xd xu Hx Hfx Hd Hu Hxd Hxu).
+elim Hfx.
+rewrite <- Hx.
+apply generic_format_0.
+(* . *)
+assert (Hx': (0 < -x)%R).
+apply Ropp_lt_cancel.
+now rewrite Ropp_involutive, Ropp_0.
+destruct xd as (md, ed).
+destruct xu as (mu, eu).
+simpl.
+rewrite <- (Bool.negb_involutive (Zeven mu)).
+apply f_equal.
+apply sym_eq.
+rewrite <- (Zeven_opp mu), <- (Zeven_opp md).
+change (Zeven (Fnum (Float beta (-md) ed)) = negb (Zeven (Fnum (Float beta (-mu) eu)))).
+apply (Hpos (-x)%R _ _ Hx').
+intros H.
+apply Hfx.
+rewrite <- Ropp_involutive.
+now apply generic_format_opp.
+now apply canonic_opp.
+now apply canonic_opp.
+rewrite round_DN_opp, F2R_Zopp.
+now apply f_equal.
+rewrite round_UP_opp, F2R_Zopp.
+now apply f_equal.
+Qed.
+
+Class Exists_NE :=
+ exists_NE : Zeven beta = false \/ forall e,
+ ((fexp e < e)%Z -> (fexp (e + 1) < e)%Z) /\ ((e <= fexp e)%Z -> fexp (fexp e + 1) = fexp e).
+
+Context { exists_NE_ : Exists_NE }.
+
+Theorem DN_UP_parity_generic_pos :
+ DN_UP_parity_pos_prop.
+Proof with auto with typeclass_instances.
+intros x xd xu H0x Hfx Hd Hu Hxd Hxu.
+destruct (ln_beta beta x) as (ex, Hexa).
+specialize (Hexa (Rgt_not_eq _ _ H0x)).
+generalize Hexa. intros Hex.
+rewrite (Rabs_pos_eq _ (Rlt_le _ _ H0x)) in Hex.
+destruct (Zle_or_lt ex (fexp ex)) as [Hxe|Hxe].
+(* small x *)
+assert (Hd3 : Fnum xd = Z0).
+apply F2R_eq_0_reg with beta (Fexp xd).
+change (F2R xd = R0).
+rewrite Hxd.
+apply round_DN_small_pos with (1 := Hex) (2 := Hxe).
+assert (Hu3 : xu = Float beta (1 * Zpower beta (fexp ex - fexp (fexp ex + 1))) (fexp (fexp ex + 1))).
+apply canonic_unicity with (1 := Hu).
+apply (f_equal fexp).
+rewrite <- F2R_change_exp.
+now rewrite F2R_bpow, ln_beta_bpow.
+now apply valid_exp.
+rewrite <- F2R_change_exp.
+rewrite F2R_bpow.
+apply sym_eq.
+rewrite Hxu.
+apply sym_eq.
+apply round_UP_small_pos with (1 := Hex) (2 := Hxe).
+now apply valid_exp.
+rewrite Hd3, Hu3.
+rewrite Zmult_1_l.
+simpl.
+destruct exists_NE_ as [H|H].
+apply Zeven_Zpower_odd with (2 := H).
+apply Zle_minus_le_0.
+now apply valid_exp.
+rewrite (proj2 (H ex)).
+now rewrite Zminus_diag.
+exact Hxe.
+(* large x *)
+assert (Hd4: (bpow (ex - 1) <= Rabs (F2R xd) < bpow ex)%R).
+rewrite Rabs_pos_eq.
+rewrite Hxd.
+split.
+apply (round_DN_pt beta fexp x).
+apply generic_format_bpow.
+ring_simplify (ex - 1 + 1)%Z.
+omega.
+apply Hex.
+apply Rle_lt_trans with (2 := proj2 Hex).
+apply (round_DN_pt beta fexp x).
+rewrite Hxd.
+apply (round_DN_pt beta fexp x).
+apply generic_format_0.
+now apply Rlt_le.
+assert (Hxe2 : (fexp (ex + 1) <= ex)%Z) by now apply valid_exp.
+assert (Hud: (F2R xu = F2R xd + ulp beta fexp x)%R).
+rewrite Hxu, Hxd.
+now apply ulp_DN_UP.
+destruct (total_order_T (bpow ex) (F2R xu)) as [[Hu2|Hu2]|Hu2].
+(* - xu > bpow ex *)
+elim (Rlt_not_le _ _ Hu2).
+rewrite Hxu.
+apply round_bounded_large_pos...
+(* - xu = bpow ex *)
+assert (Hu3: xu = Float beta (1 * Zpower beta (ex - fexp (ex + 1))) (fexp (ex + 1))).
+apply canonic_unicity with (1 := Hu).
+apply (f_equal fexp).
+rewrite <- F2R_change_exp.
+now rewrite F2R_bpow, ln_beta_bpow.
+now apply valid_exp.
+rewrite <- Hu2.
+apply sym_eq.
+rewrite <- F2R_change_exp.
+apply F2R_bpow.
+exact Hxe2.
+assert (Hd3: xd = Float beta (Zpower beta (ex - fexp ex) - 1) (fexp ex)).
+assert (H: F2R xd = F2R (Float beta (Zpower beta (ex - fexp ex) - 1) (fexp ex))).
+unfold F2R. simpl.
+rewrite Z2R_minus.
+unfold Rminus.
+rewrite Rmult_plus_distr_r.
+rewrite Z2R_Zpower, <- bpow_plus.
+ring_simplify (ex - fexp ex + fexp ex)%Z.
+rewrite Hu2, Hud.
+unfold ulp, canonic_exp.
+rewrite ln_beta_unique with beta x ex.
+unfold F2R.
+simpl. ring.
+rewrite Rabs_pos_eq.
+exact Hex.
+now apply Rlt_le.
+apply Zle_minus_le_0.
+now apply Zlt_le_weak.
+apply canonic_unicity with (1 := Hd) (3 := H).
+apply (f_equal fexp).
+rewrite <- H.
+apply sym_eq.
+now apply ln_beta_unique.
+rewrite Hd3, Hu3.
+unfold Fnum.
+rewrite Zeven_mult. simpl.
+unfold Zminus at 2.
+rewrite Zeven_plus.
+rewrite eqb_sym. simpl.
+fold (negb (Zeven (beta ^ (ex - fexp ex)))).
+rewrite Bool.negb_involutive.
+rewrite (Zeven_Zpower beta (ex - fexp ex)). 2: omega.
+destruct exists_NE_.
+rewrite H.
+apply Zeven_Zpower_odd with (2 := H).
+now apply Zle_minus_le_0.
+apply Zeven_Zpower.
+specialize (H ex).
+omega.
+(* - xu < bpow ex *)
+revert Hud.
+unfold ulp, F2R.
+rewrite Hd, Hu.
+unfold canonic_exp.
+rewrite ln_beta_unique with beta (F2R xu) ex.
+rewrite ln_beta_unique with (1 := Hd4).
+rewrite ln_beta_unique with (1 := Hexa).
+intros H.
+replace (Fnum xu) with (Fnum xd + 1)%Z.
+rewrite Zeven_plus.
+now apply eqb_sym.
+apply sym_eq.
+apply eq_Z2R.
+rewrite Z2R_plus.
+apply Rmult_eq_reg_r with (bpow (fexp ex)).
+rewrite H.
+simpl. ring.
+apply Rgt_not_eq.
+apply bpow_gt_0.
+rewrite Rabs_pos_eq.
+split.
+apply Rle_trans with (1 := proj1 Hex).
+rewrite Hxu.
+apply (round_UP_pt beta fexp x).
+exact Hu2.
+apply Rlt_le.
+apply Rlt_le_trans with (1 := H0x).
+rewrite Hxu.
+apply (round_UP_pt beta fexp x).
+Qed.
+
+Theorem DN_UP_parity_generic :
+ DN_UP_parity_prop.
+Proof.
+apply DN_UP_parity_aux.
+apply DN_UP_parity_generic_pos.
+Qed.
+
+Theorem Rnd_NE_pt_total :
+ round_pred_total Rnd_NE_pt.
+Proof.
+apply satisfies_any_imp_NG.
+now apply generic_format_satisfies_any.
+intros x d u Hf Hd Hu.
+generalize (proj1 Hd).
+unfold generic_format.
+set (ed := canonic_exp beta fexp d).
+set (md := Ztrunc (scaled_mantissa beta fexp d)).
+intros Hd1.
+case_eq (Zeven md) ; [ intros He | intros Ho ].
+right.
+exists (Float beta md ed).
+unfold Fcore_generic_fmt.canonic.
+rewrite <- Hd1.
+now repeat split.
+left.
+generalize (proj1 Hu).
+unfold generic_format.
+set (eu := canonic_exp beta fexp u).
+set (mu := Ztrunc (scaled_mantissa beta fexp u)).
+intros Hu1.
+rewrite Hu1.
+eexists ; repeat split.
+unfold Fcore_generic_fmt.canonic.
+now rewrite <- Hu1.
+rewrite (DN_UP_parity_generic x (Float beta md ed) (Float beta mu eu)).
+simpl.
+now rewrite Ho.
+exact Hf.
+unfold Fcore_generic_fmt.canonic.
+now rewrite <- Hd1.
+unfold Fcore_generic_fmt.canonic.
+now rewrite <- Hu1.
+rewrite <- Hd1.
+apply Rnd_DN_pt_unicity with (1 := Hd).
+now apply round_DN_pt.
+rewrite <- Hu1.
+apply Rnd_UP_pt_unicity with (1 := Hu).
+now apply round_UP_pt.
+Qed.
+
+Theorem Rnd_NE_pt_monotone :
+ round_pred_monotone Rnd_NE_pt.
+Proof.
+apply Rnd_NG_pt_monotone.
+intros x d u Hd Hdn Hu Hun (cd, (Hd1, Hd2)) (cu, (Hu1, Hu2)).
+destruct (Req_dec x d) as [Hx|Hx].
+rewrite <- Hx.
+apply sym_eq.
+apply Rnd_UP_pt_idempotent with (1 := Hu).
+rewrite Hx.
+apply Hd.
+rewrite (DN_UP_parity_aux DN_UP_parity_generic_pos x cd cu) in Hu2 ; try easy.
+now rewrite (proj2 Hd2) in Hu2.
+intros Hf.
+apply Hx.
+apply sym_eq.
+now apply Rnd_DN_pt_idempotent with (1 := Hd).
+rewrite <- Hd1.
+apply Rnd_DN_pt_unicity with (1 := Hd).
+now apply round_DN_pt.
+rewrite <- Hu1.
+apply Rnd_UP_pt_unicity with (1 := Hu).
+now apply round_UP_pt.
+Qed.
+
+Theorem Rnd_NE_pt_round :
+ round_pred Rnd_NE_pt.
+split.
+apply Rnd_NE_pt_total.
+apply Rnd_NE_pt_monotone.
+Qed.
+
+Lemma round_NE_pt_pos :
+ forall x,
+ (0 < x)%R ->
+ Rnd_NE_pt x (round beta fexp ZnearestE x).
+Proof with auto with typeclass_instances.
+intros x Hx.
+split.
+now apply round_N_pt.
+unfold NE_prop.
+set (mx := scaled_mantissa beta fexp x).
+set (xr := round beta fexp ZnearestE x).
+destruct (Req_dec (mx - Z2R (Zfloor mx)) (/2)) as [Hm|Hm].
+(* midpoint *)
+left.
+exists (Float beta (Ztrunc (scaled_mantissa beta fexp xr)) (canonic_exp beta fexp xr)).
+split.
+apply round_N_pt...
+split.
+unfold Fcore_generic_fmt.canonic. simpl.
+apply f_equal.
+apply round_N_pt...
+simpl.
+unfold xr, round, Znearest.
+fold mx.
+rewrite Hm.
+rewrite Rcompare_Eq. 2: apply refl_equal.
+case_eq (Zeven (Zfloor mx)) ; intros Hmx.
+(* . even floor *)
+change (Zeven (Ztrunc (scaled_mantissa beta fexp (round beta fexp Zfloor x))) = true).
+destruct (Rle_or_lt (round beta fexp Zfloor x) 0) as [Hr|Hr].
+rewrite (Rle_antisym _ _ Hr).
+unfold scaled_mantissa.
+rewrite Rmult_0_l.
+change R0 with (Z2R 0).
+now rewrite (Ztrunc_Z2R 0).
+rewrite <- (round_0 beta fexp Zfloor).
+apply round_le...
+now apply Rlt_le.
+rewrite scaled_mantissa_DN...
+now rewrite Ztrunc_Z2R.
+(* . odd floor *)
+change (Zeven (Ztrunc (scaled_mantissa beta fexp (round beta fexp Zceil x))) = true).
+destruct (ln_beta beta x) as (ex, Hex).
+specialize (Hex (Rgt_not_eq _ _ Hx)).
+rewrite (Rabs_pos_eq _ (Rlt_le _ _ Hx)) in Hex.
+destruct (Z_lt_le_dec (fexp ex) ex) as [He|He].
+(* .. large pos *)
+assert (Hu := round_bounded_large_pos _ _ Zceil _ _ He Hex).
+assert (Hfc: Zceil mx = (Zfloor mx + 1)%Z).
+apply Zceil_floor_neq.
+intros H.
+rewrite H in Hm.
+unfold Rminus in Hm.
+rewrite Rplus_opp_r in Hm.
+elim (Rlt_irrefl 0).
+rewrite Hm at 2.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+destruct (proj2 Hu) as [Hu'|Hu'].
+(* ... u <> bpow *)
+unfold scaled_mantissa.
+rewrite canonic_exp_fexp_pos with (1 := conj (proj1 Hu) Hu').
+unfold round, F2R. simpl.
+rewrite canonic_exp_fexp_pos with (1 := Hex).
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+rewrite Ztrunc_Z2R.
+fold mx.
+rewrite Hfc.
+now rewrite Zeven_plus, Hmx.
+(* ... u = bpow *)
+rewrite Hu'.
+unfold scaled_mantissa, canonic_exp.
+rewrite ln_beta_bpow.
+rewrite <- bpow_plus, <- Z2R_Zpower.
+rewrite Ztrunc_Z2R.
+case_eq (Zeven beta) ; intros Hr.
+destruct exists_NE_ as [Hs|Hs].
+now rewrite Hs in Hr.
+destruct (Hs ex) as (H,_).
+rewrite Zeven_Zpower.
+exact Hr.
+omega.
+assert (Zeven (Zfloor mx) = true). 2: now rewrite H in Hmx.
+replace (Zfloor mx) with (Zceil mx + -1)%Z by omega.
+rewrite Zeven_plus.
+apply eqb_true.
+unfold mx.
+replace (Zceil (scaled_mantissa beta fexp x)) with (Zpower beta (ex - fexp ex)).
+rewrite Zeven_Zpower_odd with (2 := Hr).
+easy.
+omega.
+apply eq_Z2R.
+rewrite Z2R_Zpower. 2: omega.
+apply Rmult_eq_reg_r with (bpow (fexp ex)).
+unfold Zminus.
+rewrite bpow_plus.
+rewrite Rmult_assoc, <- bpow_plus, Zplus_opp_l, Rmult_1_r.
+pattern (fexp ex) ; rewrite <- canonic_exp_fexp_pos with (1 := Hex).
+now apply sym_eq.
+apply Rgt_not_eq.
+apply bpow_gt_0.
+generalize (proj1 (valid_exp ex) He).
+omega.
+(* .. small pos *)
+assert (Zeven (Zfloor mx) = true). 2: now rewrite H in Hmx.
+unfold mx, scaled_mantissa.
+rewrite canonic_exp_fexp_pos with (1 := Hex).
+now rewrite mantissa_DN_small_pos.
+(* not midpoint *)
+right.
+intros g Hg.
+destruct (Req_dec x g) as [Hxg|Hxg].
+rewrite <- Hxg.
+apply sym_eq.
+apply round_generic...
+rewrite Hxg.
+apply Hg.
+set (d := round beta fexp Zfloor x).
+set (u := round beta fexp Zceil x).
+apply Rnd_N_pt_unicity with (d := d) (u := u) (4 := Hg).
+now apply round_DN_pt.
+now apply round_UP_pt.
+2: now apply round_N_pt.
+rewrite <- (scaled_mantissa_mult_bpow beta fexp x).
+unfold d, u, round, F2R. simpl. fold mx.
+rewrite <- 2!Rmult_minus_distr_r.
+intros H.
+apply Rmult_eq_reg_r in H.
+apply Hm.
+apply Rcompare_Eq_inv.
+rewrite Rcompare_floor_ceil_mid.
+now apply Rcompare_Eq.
+contradict Hxg.
+apply sym_eq.
+apply Rnd_N_pt_idempotent with (1 := Hg).
+rewrite <- (scaled_mantissa_mult_bpow beta fexp x).
+fold mx.
+rewrite <- Hxg.
+change (Z2R (Zfloor mx) * bpow (canonic_exp beta fexp x))%R with d.
+now eapply round_DN_pt.
+apply Rgt_not_eq.
+apply bpow_gt_0.
+Qed.
+
+Theorem round_NE_opp :
+ forall x,
+ round beta fexp ZnearestE (-x) = (- round beta fexp ZnearestE x)%R.
+Proof.
+intros x.
+unfold round. simpl.
+rewrite scaled_mantissa_opp, canonic_exp_opp.
+rewrite Znearest_opp.
+rewrite <- F2R_Zopp.
+apply (f_equal (fun v => F2R (Float beta (-v) _))).
+set (m := scaled_mantissa beta fexp x).
+unfold Znearest.
+case Rcompare ; trivial.
+apply (f_equal (fun (b : bool) => if b then Zceil m else Zfloor m)).
+rewrite Bool.negb_involutive.
+rewrite Zeven_opp.
+rewrite Zeven_plus.
+now rewrite eqb_sym.
+Qed.
+
+Theorem round_NE_pt :
+ forall x,
+ Rnd_NE_pt x (round beta fexp ZnearestE x).
+Proof with auto with typeclass_instances.
+intros x.
+destruct (total_order_T x 0) as [[Hx|Hx]|Hx].
+apply Rnd_NG_pt_sym.
+apply generic_format_opp.
+unfold NE_prop.
+intros _ f ((mg,eg),(H1,(H2,H3))).
+exists (Float beta (- mg) eg).
+repeat split.
+rewrite H1.
+now rewrite F2R_Zopp.
+now apply canonic_opp.
+simpl.
+now rewrite Zeven_opp.
+rewrite <- round_NE_opp.
+apply round_NE_pt_pos.
+now apply Ropp_0_gt_lt_contravar.
+rewrite Hx, round_0...
+apply Rnd_NG_pt_refl.
+apply generic_format_0.
+now apply round_NE_pt_pos.
+Qed.
+
+End Fcore_rnd_NE.
+
+(** Notations for backward-compatibility with Flocq 1.4. *)
+Notation rndNE := ZnearestE (only parsing).
diff --git a/flocq/Core/Fcore_ulp.v b/flocq/Core/Fcore_ulp.v
new file mode 100644
index 0000000..492fac6
--- /dev/null
+++ b/flocq/Core/Fcore_ulp.v
@@ -0,0 +1,1142 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Unit in the Last Place: our definition using fexp and its properties, successor and predecessor *)
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_rnd.
+Require Import Fcore_generic_fmt.
+Require Import Fcore_float_prop.
+
+Section Fcore_ulp.
+
+Variable beta : radix.
+
+Notation bpow e := (bpow beta e).
+
+Variable fexp : Z -> Z.
+
+Context { valid_exp : Valid_exp fexp }.
+
+Definition ulp x := bpow (canonic_exp beta fexp x).
+
+Notation F := (generic_format beta fexp).
+
+Theorem ulp_opp :
+ forall x, ulp (- x) = ulp x.
+Proof.
+intros x.
+unfold ulp.
+now rewrite canonic_exp_opp.
+Qed.
+
+Theorem ulp_abs :
+ forall x, ulp (Rabs x) = ulp x.
+Proof.
+intros x.
+unfold ulp.
+now rewrite canonic_exp_abs.
+Qed.
+
+Theorem ulp_le_id:
+ forall x,
+ (0 < x)%R ->
+ F x ->
+ (ulp x <= x)%R.
+Proof.
+intros x Zx Fx.
+rewrite <- (Rmult_1_l (ulp x)).
+pattern x at 2; rewrite Fx.
+unfold F2R, ulp; simpl.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 1%R with (Z2R (Zsucc 0)) by reflexivity.
+apply Z2R_le.
+apply Zlt_le_succ.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+now rewrite <- Fx.
+Qed.
+
+Theorem ulp_le_abs:
+ forall x,
+ (x <> 0)%R ->
+ F x ->
+ (ulp x <= Rabs x)%R.
+Proof.
+intros x Zx Fx.
+rewrite <- ulp_abs.
+apply ulp_le_id.
+now apply Rabs_pos_lt.
+now apply generic_format_abs.
+Qed.
+
+Theorem ulp_DN_UP :
+ forall x, ~ F x ->
+ round beta fexp Zceil x = (round beta fexp Zfloor x + ulp x)%R.
+Proof.
+intros x Fx.
+unfold round, ulp. simpl.
+unfold F2R. simpl.
+rewrite Zceil_floor_neq.
+rewrite Z2R_plus. simpl.
+ring.
+intros H.
+apply Fx.
+unfold generic_format, F2R. simpl.
+rewrite <- H.
+rewrite Ztrunc_Z2R.
+rewrite H.
+now rewrite scaled_mantissa_mult_bpow.
+Qed.
+
+(** The successor of x is x + ulp x *)
+Theorem succ_le_bpow :
+ forall x e, (0 < x)%R -> F x ->
+ (x < bpow e)%R ->
+ (x + ulp x <= bpow e)%R.
+Proof.
+intros x e Zx Fx Hx.
+pattern x at 1 ; rewrite Fx.
+unfold ulp, F2R. simpl.
+pattern (bpow (canonic_exp beta fexp x)) at 2 ; rewrite <- Rmult_1_l.
+rewrite <- Rmult_plus_distr_r.
+change 1%R with (Z2R 1).
+rewrite <- Z2R_plus.
+change (F2R (Float beta (Ztrunc (scaled_mantissa beta fexp x) + 1) (canonic_exp beta fexp x)) <= bpow e)%R.
+apply F2R_p1_le_bpow.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+now rewrite <- Fx.
+now rewrite <- Fx.
+Qed.
+
+Theorem ln_beta_succ :
+ forall x, (0 < x)%R -> F x ->
+ forall eps, (0 <= eps < ulp x)%R ->
+ ln_beta beta (x + eps) = ln_beta beta x :> Z.
+Proof.
+intros x Zx Fx eps Heps.
+destruct (ln_beta beta x) as (ex, He).
+simpl.
+specialize (He (Rgt_not_eq _ _ Zx)).
+apply ln_beta_unique.
+rewrite Rabs_pos_eq.
+rewrite Rabs_pos_eq in He.
+split.
+apply Rle_trans with (1 := proj1 He).
+pattern x at 1 ; rewrite <- Rplus_0_r.
+now apply Rplus_le_compat_l.
+apply Rlt_le_trans with (x + ulp x)%R.
+now apply Rplus_lt_compat_l.
+pattern x at 1 ; rewrite Fx.
+unfold ulp, F2R. simpl.
+pattern (bpow (canonic_exp beta fexp x)) at 2 ; rewrite <- Rmult_1_l.
+rewrite <- Rmult_plus_distr_r.
+change 1%R with (Z2R 1).
+rewrite <- Z2R_plus.
+change (F2R (Float beta (Ztrunc (scaled_mantissa beta fexp x) + 1) (canonic_exp beta fexp x)) <= bpow ex)%R.
+apply F2R_p1_le_bpow.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+now rewrite <- Fx.
+now rewrite <- Fx.
+now apply Rlt_le.
+apply Rplus_le_le_0_compat.
+now apply Rlt_le.
+apply Heps.
+Qed.
+
+Theorem round_DN_succ :
+ forall x, (0 < x)%R -> F x ->
+ forall eps, (0 <= eps < ulp x)%R ->
+ round beta fexp Zfloor (x + eps) = x.
+Proof.
+intros x Zx Fx eps Heps.
+pattern x at 2 ; rewrite Fx.
+unfold round.
+unfold scaled_mantissa. simpl.
+unfold canonic_exp at 1 2.
+rewrite ln_beta_succ ; trivial.
+apply (f_equal (fun m => F2R (Float beta m _))).
+rewrite Ztrunc_floor.
+apply Zfloor_imp.
+split.
+apply (Rle_trans _ _ _ (Zfloor_lb _)).
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+pattern x at 1 ; rewrite <- Rplus_0_r.
+now apply Rplus_le_compat_l.
+apply Rlt_le_trans with ((x + ulp x) * bpow (- canonic_exp beta fexp x))%R.
+apply Rmult_lt_compat_r.
+apply bpow_gt_0.
+now apply Rplus_lt_compat_l.
+rewrite Rmult_plus_distr_r.
+rewrite Z2R_plus.
+apply Rplus_le_compat.
+pattern x at 1 3 ; rewrite Fx.
+unfold F2R. simpl.
+rewrite Rmult_assoc.
+rewrite <- bpow_plus.
+rewrite Zplus_opp_r.
+rewrite Rmult_1_r.
+rewrite Zfloor_Z2R.
+apply Rle_refl.
+unfold ulp.
+rewrite <- bpow_plus.
+rewrite Zplus_opp_r.
+apply Rle_refl.
+apply Rmult_le_pos.
+now apply Rlt_le.
+apply bpow_ge_0.
+Qed.
+
+Theorem generic_format_succ :
+ forall x, (0 < x)%R -> F x ->
+ F (x + ulp x).
+Proof.
+intros x Zx Fx.
+destruct (ln_beta beta x) as (ex, Ex).
+specialize (Ex (Rgt_not_eq _ _ Zx)).
+assert (Ex' := Ex).
+rewrite Rabs_pos_eq in Ex'.
+destruct (succ_le_bpow x ex) ; try easy.
+unfold generic_format, scaled_mantissa, canonic_exp.
+rewrite ln_beta_unique with beta (x + ulp x)%R ex.
+pattern x at 1 3 ; rewrite Fx.
+unfold ulp, scaled_mantissa.
+rewrite canonic_exp_fexp with (1 := Ex).
+unfold F2R. simpl.
+rewrite Rmult_plus_distr_r.
+rewrite Rmult_assoc.
+rewrite <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+change (bpow 0) with (Z2R 1).
+rewrite <- Z2R_plus.
+rewrite Ztrunc_Z2R.
+rewrite Z2R_plus.
+rewrite Rmult_plus_distr_r.
+now rewrite Rmult_1_l.
+rewrite Rabs_pos_eq.
+split.
+apply Rle_trans with (1 := proj1 Ex').
+pattern x at 1 ; rewrite <- Rplus_0_r.
+apply Rplus_le_compat_l.
+apply bpow_ge_0.
+exact H.
+apply Rplus_le_le_0_compat.
+now apply Rlt_le.
+apply bpow_ge_0.
+rewrite H.
+apply generic_format_bpow.
+apply valid_exp.
+destruct (Zle_or_lt ex (fexp ex)) ; trivial.
+elim Rlt_not_le with (1 := Zx).
+rewrite Fx.
+replace (Ztrunc (scaled_mantissa beta fexp x)) with Z0.
+rewrite F2R_0.
+apply Rle_refl.
+unfold scaled_mantissa.
+rewrite canonic_exp_fexp with (1 := Ex).
+destruct (mantissa_small_pos beta fexp x ex) ; trivial.
+rewrite Ztrunc_floor.
+apply sym_eq.
+apply Zfloor_imp.
+split.
+now apply Rlt_le.
+exact H2.
+now apply Rlt_le.
+now apply Rlt_le.
+Qed.
+
+Theorem round_UP_succ :
+ forall x, (0 < x)%R -> F x ->
+ forall eps, (0 < eps <= ulp x)%R ->
+ round beta fexp Zceil (x + eps) = (x + ulp x)%R.
+Proof with auto with typeclass_instances.
+intros x Zx Fx eps (Heps1,[Heps2|Heps2]).
+assert (Heps: (0 <= eps < ulp x)%R).
+split.
+now apply Rlt_le.
+exact Heps2.
+assert (Hd := round_DN_succ x Zx Fx eps Heps).
+rewrite ulp_DN_UP.
+rewrite Hd.
+unfold ulp, canonic_exp.
+now rewrite ln_beta_succ.
+intros Fs.
+rewrite round_generic in Hd...
+apply Rgt_not_eq with (2 := Hd).
+pattern x at 2 ; rewrite <- Rplus_0_r.
+now apply Rplus_lt_compat_l.
+rewrite Heps2.
+apply round_generic...
+now apply generic_format_succ.
+Qed.
+
+Theorem succ_le_lt:
+ forall x y,
+ F x -> F y ->
+ (0 < x < y)%R ->
+ (x + ulp x <= y)%R.
+Proof with auto with typeclass_instances.
+intros x y Hx Hy H.
+case (Rle_or_lt (ulp x) (y-x)); intros H1.
+apply Rplus_le_reg_r with (-x)%R.
+now ring_simplify (x+ulp x + -x)%R.
+replace y with (x+(y-x))%R by ring.
+absurd (x < y)%R.
+2: apply H.
+apply Rle_not_lt; apply Req_le.
+rewrite <- round_DN_succ with (eps:=(y-x)%R); try easy.
+ring_simplify (x+(y-x))%R.
+apply sym_eq.
+apply round_generic...
+split; trivial.
+apply Rlt_le; now apply Rlt_Rminus.
+Qed.
+
+(** Error of a rounding, expressed in number of ulps *)
+Theorem ulp_error :
+ forall rnd { Zrnd : Valid_rnd rnd } x,
+ (Rabs (round beta fexp rnd x - x) < ulp x)%R.
+Proof with auto with typeclass_instances.
+intros rnd Zrnd x.
+destruct (generic_format_EM beta fexp x) as [Hx|Hx].
+(* x = rnd x *)
+rewrite round_generic...
+unfold Rminus.
+rewrite Rplus_opp_r, Rabs_R0.
+apply bpow_gt_0.
+(* x <> rnd x *)
+destruct (round_DN_or_UP beta fexp rnd x) as [H|H] ; rewrite H ; clear H.
+(* . *)
+rewrite Rabs_left1.
+rewrite Ropp_minus_distr.
+apply Rplus_lt_reg_r with (round beta fexp Zfloor x).
+rewrite <- ulp_DN_UP with (1 := Hx).
+ring_simplify.
+assert (Hu: (x <= round beta fexp Zceil x)%R).
+apply round_UP_pt...
+destruct Hu as [Hu|Hu].
+exact Hu.
+elim Hx.
+rewrite Hu.
+apply generic_format_round...
+apply Rle_minus.
+apply round_DN_pt...
+(* . *)
+rewrite Rabs_pos_eq.
+rewrite ulp_DN_UP with (1 := Hx).
+apply Rplus_lt_reg_r with (x - ulp x)%R.
+ring_simplify.
+assert (Hd: (round beta fexp Zfloor x <= x)%R).
+apply round_DN_pt...
+destruct Hd as [Hd|Hd].
+exact Hd.
+elim Hx.
+rewrite <- Hd.
+apply generic_format_round...
+apply Rle_0_minus.
+apply round_UP_pt...
+Qed.
+
+Theorem ulp_half_error :
+ forall choice x,
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * ulp x)%R.
+Proof with auto with typeclass_instances.
+intros choice x.
+destruct (generic_format_EM beta fexp x) as [Hx|Hx].
+(* x = rnd x *)
+rewrite round_generic...
+unfold Rminus.
+rewrite Rplus_opp_r, Rabs_R0.
+apply Rmult_le_pos.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_ge_0.
+(* x <> rnd x *)
+set (d := round beta fexp Zfloor x).
+destruct (round_N_pt beta fexp choice x) as (Hr1, Hr2).
+destruct (Rle_or_lt (x - d) (d + ulp x - x)) as [H|H].
+(* . rnd(x) = rndd(x) *)
+apply Rle_trans with (Rabs (d - x)).
+apply Hr2.
+apply (round_DN_pt beta fexp x).
+rewrite Rabs_left1.
+rewrite Ropp_minus_distr.
+apply Rmult_le_reg_r with 2%R.
+now apply (Z2R_lt 0 2).
+apply Rplus_le_reg_r with (d - x)%R.
+ring_simplify.
+apply Rle_trans with (1 := H).
+right. field.
+apply Rle_minus.
+apply (round_DN_pt beta fexp x).
+(* . rnd(x) = rndu(x) *)
+assert (Hu: (d + ulp x)%R = round beta fexp Zceil x).
+unfold d.
+now rewrite <- ulp_DN_UP.
+apply Rle_trans with (Rabs (d + ulp x - x)).
+apply Hr2.
+rewrite Hu.
+apply (round_UP_pt beta fexp x).
+rewrite Rabs_pos_eq.
+apply Rmult_le_reg_r with 2%R.
+now apply (Z2R_lt 0 2).
+apply Rplus_le_reg_r with (- (d + ulp x - x))%R.
+ring_simplify.
+apply Rlt_le.
+apply Rlt_le_trans with (1 := H).
+right. field.
+apply Rle_0_minus.
+rewrite Hu.
+apply (round_UP_pt beta fexp x).
+Qed.
+
+Theorem ulp_le :
+ forall { Hm : Monotone_exp fexp },
+ forall x y: R,
+ (0 < x)%R -> (x <= y)%R ->
+ (ulp x <= ulp y)%R.
+Proof.
+intros Hm x y Hx Hxy.
+apply bpow_le.
+apply Hm.
+now apply ln_beta_le.
+Qed.
+
+Theorem ulp_bpow :
+ forall e, ulp (bpow e) = bpow (fexp (e + 1)).
+intros e.
+unfold ulp.
+apply f_equal.
+apply canonic_exp_fexp.
+rewrite Rabs_pos_eq.
+split.
+ring_simplify (e + 1 - 1)%Z.
+apply Rle_refl.
+apply bpow_lt.
+apply Zlt_succ.
+apply bpow_ge_0.
+Qed.
+
+Theorem ulp_DN :
+ forall x,
+ (0 < round beta fexp Zfloor x)%R ->
+ ulp (round beta fexp Zfloor x) = ulp x.
+Proof.
+intros x Hd.
+unfold ulp.
+now rewrite canonic_exp_DN with (2 := Hd).
+Qed.
+
+Theorem ulp_error_f :
+ forall { Hm : Monotone_exp fexp } rnd { Zrnd : Valid_rnd rnd } x,
+ (round beta fexp rnd x <> 0)%R ->
+ (Rabs (round beta fexp rnd x - x) < ulp (round beta fexp rnd x))%R.
+Proof with auto with typeclass_instances.
+intros Hm rnd Zrnd x Hfx.
+case (round_DN_or_UP beta fexp rnd x); intros Hx.
+(* *)
+case (Rle_or_lt 0 (round beta fexp Zfloor x)).
+intros H; destruct H.
+rewrite Hx at 2.
+rewrite ulp_DN; trivial.
+apply ulp_error...
+rewrite Hx in Hfx; contradict Hfx; auto with real.
+intros H.
+apply Rlt_le_trans with (1:=ulp_error _ _).
+rewrite <- (ulp_opp x), <- (ulp_opp (round beta fexp rnd x)).
+apply ulp_le; trivial.
+apply Ropp_0_gt_lt_contravar; apply Rlt_gt.
+case (Rle_or_lt 0 x); trivial.
+intros H1; contradict H.
+apply Rle_not_lt.
+apply round_ge_generic...
+apply generic_format_0.
+apply Ropp_le_contravar; rewrite Hx.
+apply (round_DN_pt beta fexp x).
+(* *)
+rewrite Hx; case (Rle_or_lt 0 (round beta fexp Zceil x)).
+intros H; destruct H.
+apply Rlt_le_trans with (1:=ulp_error _ _).
+apply ulp_le; trivial.
+case (Rle_or_lt x 0); trivial.
+intros H1; contradict H.
+apply Rle_not_lt.
+apply round_le_generic...
+apply generic_format_0.
+apply round_UP_pt...
+rewrite Hx in Hfx; contradict Hfx; auto with real.
+intros H.
+rewrite <- (ulp_opp (round beta fexp Zceil x)).
+rewrite <- round_DN_opp.
+rewrite ulp_DN; trivial.
+replace (round beta fexp Zceil x - x)%R with (-((round beta fexp Zfloor (-x) - (-x))))%R.
+rewrite Rabs_Ropp.
+apply ulp_error...
+rewrite round_DN_opp; ring.
+rewrite round_DN_opp; apply Ropp_0_gt_lt_contravar; apply Rlt_gt; assumption.
+Qed.
+
+Theorem ulp_half_error_f :
+ forall { Hm : Monotone_exp fexp },
+ forall choice x,
+ (round beta fexp (Znearest choice) x <> 0)%R ->
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * ulp (round beta fexp (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros Hm choice x Hfx.
+case (round_DN_or_UP beta fexp (Znearest choice) x); intros Hx.
+(* *)
+case (Rle_or_lt 0 (round beta fexp Zfloor x)).
+intros H; destruct H.
+rewrite Hx at 2.
+rewrite ulp_DN; trivial.
+apply ulp_half_error.
+rewrite Hx in Hfx; contradict Hfx; auto with real.
+intros H.
+apply Rle_trans with (1:=ulp_half_error _ _).
+apply Rmult_le_compat_l.
+auto with real.
+rewrite <- (ulp_opp x), <- (ulp_opp (round beta fexp (Znearest choice) x)).
+apply ulp_le; trivial.
+apply Ropp_0_gt_lt_contravar; apply Rlt_gt.
+case (Rle_or_lt 0 x); trivial.
+intros H1; contradict H.
+apply Rle_not_lt.
+apply round_ge_generic...
+apply generic_format_0.
+apply Ropp_le_contravar; rewrite Hx.
+apply (round_DN_pt beta fexp x).
+(* *)
+case (Rle_or_lt 0 (round beta fexp Zceil x)).
+intros H; destruct H.
+apply Rle_trans with (1:=ulp_half_error _ _).
+apply Rmult_le_compat_l.
+auto with real.
+apply ulp_le; trivial.
+case (Rle_or_lt x 0); trivial.
+intros H1; contradict H.
+apply Rle_not_lt.
+apply round_le_generic...
+apply generic_format_0.
+rewrite Hx; apply (round_UP_pt beta fexp x).
+rewrite Hx in Hfx; contradict Hfx; auto with real.
+intros H.
+rewrite Hx at 2; rewrite <- (ulp_opp (round beta fexp Zceil x)).
+rewrite <- round_DN_opp.
+rewrite ulp_DN; trivial.
+pattern x at 1 2; rewrite <- Ropp_involutive.
+rewrite round_N_opp.
+unfold Rminus.
+rewrite <- Ropp_plus_distr, Rabs_Ropp.
+apply ulp_half_error.
+rewrite round_DN_opp; apply Ropp_0_gt_lt_contravar; apply Rlt_gt; assumption.
+Qed.
+
+(** Predecessor *)
+Definition pred x :=
+ if Req_bool x (bpow (ln_beta beta x - 1)) then
+ (x - bpow (fexp (ln_beta beta x - 1)))%R
+ else
+ (x - ulp x)%R.
+
+Theorem pred_ge_bpow :
+ forall x e, F x ->
+ x <> ulp x ->
+ (bpow e < x)%R ->
+ (bpow e <= x - ulp x)%R.
+Proof.
+intros x e Fx Hx' Hx.
+(* *)
+assert (1 <= Ztrunc (scaled_mantissa beta fexp x))%Z.
+assert (0 < Ztrunc (scaled_mantissa beta fexp x))%Z.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+rewrite <- Fx.
+apply Rle_lt_trans with (2:=Hx).
+apply bpow_ge_0.
+omega.
+case (Zle_lt_or_eq _ _ H); intros Hm.
+(* *)
+pattern x at 1 ; rewrite Fx.
+unfold ulp, F2R. simpl.
+pattern (bpow (canonic_exp beta fexp x)) at 2 ; rewrite <- Rmult_1_l.
+rewrite <- Rmult_minus_distr_r.
+change 1%R with (Z2R 1).
+rewrite <- Z2R_minus.
+change (bpow e <= F2R (Float beta (Ztrunc (scaled_mantissa beta fexp x) - 1) (canonic_exp beta fexp x)))%R.
+apply bpow_le_F2R_m1; trivial.
+now rewrite <- Fx.
+(* *)
+contradict Hx'.
+pattern x at 1; rewrite Fx.
+rewrite <- Hm.
+unfold ulp, F2R; simpl.
+now rewrite Rmult_1_l.
+Qed.
+
+Lemma generic_format_pred_1:
+ forall x, (0 < x)%R -> F x ->
+ x <> bpow (ln_beta beta x - 1) ->
+ F (x - ulp x).
+Proof.
+intros x Zx Fx Hx.
+destruct (ln_beta beta x) as (ex, Ex).
+simpl in Hx.
+specialize (Ex (Rgt_not_eq _ _ Zx)).
+assert (Ex' : (bpow (ex - 1) < x < bpow ex)%R).
+rewrite Rabs_pos_eq in Ex.
+destruct Ex as (H,H'); destruct H; split; trivial.
+contradict Hx; easy.
+now apply Rlt_le.
+unfold generic_format, scaled_mantissa, canonic_exp.
+rewrite ln_beta_unique with beta (x - ulp x)%R ex.
+pattern x at 1 3 ; rewrite Fx.
+unfold ulp, scaled_mantissa.
+rewrite canonic_exp_fexp with (1 := Ex).
+unfold F2R. simpl.
+rewrite Rmult_minus_distr_r.
+rewrite Rmult_assoc.
+rewrite <- bpow_plus, Zplus_opp_r, Rmult_1_r.
+change (bpow 0) with (Z2R 1).
+rewrite <- Z2R_minus.
+rewrite Ztrunc_Z2R.
+rewrite Z2R_minus.
+rewrite Rmult_minus_distr_r.
+now rewrite Rmult_1_l.
+rewrite Rabs_pos_eq.
+split.
+apply pred_ge_bpow; trivial.
+unfold ulp; intro H.
+assert (ex-1 < canonic_exp beta fexp x < ex)%Z.
+split ; apply (lt_bpow beta) ; rewrite <- H ; easy.
+clear -H0. omega.
+apply Ex'.
+apply Rle_lt_trans with (2 := proj2 Ex').
+pattern x at 3 ; rewrite <- Rplus_0_r.
+apply Rplus_le_compat_l.
+rewrite <-Ropp_0.
+apply Ropp_le_contravar.
+apply bpow_ge_0.
+apply Rle_0_minus.
+pattern x at 2; rewrite Fx.
+unfold ulp, F2R; simpl.
+pattern (bpow (canonic_exp beta fexp x)) at 1; rewrite <- Rmult_1_l.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 1%R with (Z2R 1) by reflexivity.
+apply Z2R_le.
+assert (0 < Ztrunc (scaled_mantissa beta fexp x))%Z.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+rewrite <- Fx.
+apply Rle_lt_trans with (2:=proj1 Ex').
+apply bpow_ge_0.
+omega.
+Qed.
+
+Lemma generic_format_pred_2 :
+ forall x, (0 < x)%R -> F x ->
+ let e := ln_beta_val beta x (ln_beta beta x) in
+ x = bpow (e - 1) ->
+ F (x - bpow (fexp (e - 1))).
+Proof.
+intros x Zx Fx e Hx.
+pose (f:=(x - bpow (fexp (e - 1)))%R).
+fold f.
+assert (He:(fexp (e-1) <= e-1)%Z).
+apply generic_format_bpow_inv with beta; trivial.
+rewrite <- Hx; assumption.
+case (Zle_lt_or_eq _ _ He); clear He; intros He.
+assert (f = F2R (Float beta (Zpower beta (e-1-(fexp (e-1))) -1) (fexp (e-1))))%R.
+unfold f; rewrite Hx.
+unfold F2R; simpl.
+rewrite Z2R_minus, Z2R_Zpower.
+rewrite Rmult_minus_distr_r, Rmult_1_l.
+rewrite <- bpow_plus.
+now replace (e - 1 - fexp (e - 1) + fexp (e - 1))%Z with (e-1)%Z by ring.
+omega.
+rewrite H.
+apply generic_format_F2R.
+intros _.
+apply Zeq_le.
+apply canonic_exp_fexp.
+rewrite <- H.
+unfold f; rewrite Hx.
+rewrite Rabs_right.
+split.
+apply Rplus_le_reg_l with (bpow (fexp (e-1))).
+ring_simplify.
+apply Rle_trans with (bpow (e - 2) + bpow (e - 2))%R.
+apply Rplus_le_compat ; apply bpow_le ; omega.
+apply Rle_trans with (2*bpow (e - 2))%R;[right; ring|idtac].
+apply Rle_trans with (bpow 1*bpow (e - 2))%R.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 2%R with (Z2R 2) by reflexivity.
+replace (bpow 1) with (Z2R beta).
+apply Z2R_le.
+apply <- Zle_is_le_bool.
+now destruct beta.
+simpl.
+unfold Zpower_pos; simpl.
+now rewrite Zmult_1_r.
+rewrite <- bpow_plus.
+replace (1+(e-2))%Z with (e-1)%Z by ring.
+now right.
+rewrite <- Rplus_0_r.
+apply Rplus_lt_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply bpow_gt_0.
+apply Rle_ge; apply Rle_0_minus.
+apply bpow_le.
+omega.
+replace f with 0%R.
+apply generic_format_0.
+unfold f.
+rewrite Hx, He.
+ring.
+Qed.
+
+Theorem generic_format_pred :
+ forall x, (0 < x)%R -> F x ->
+ F (pred x).
+Proof.
+intros x Zx Fx.
+unfold pred.
+case Req_bool_spec; intros H.
+now apply generic_format_pred_2.
+now apply generic_format_pred_1.
+Qed.
+
+Theorem generic_format_plus_ulp :
+ forall { monotone_exp : Monotone_exp fexp },
+ forall x, (x <> 0)%R ->
+ F x -> F (x + ulp x).
+Proof with auto with typeclass_instances.
+intros monotone_exp x Zx Fx.
+destruct (Rtotal_order x 0) as [Hx|[Hx|Hx]].
+rewrite <- Ropp_involutive.
+apply generic_format_opp.
+rewrite Ropp_plus_distr, <- ulp_opp.
+apply generic_format_opp in Fx.
+destruct (Req_dec (-x) (bpow (ln_beta beta (-x) - 1))) as [Hx'|Hx'].
+rewrite Hx' in Fx |- *.
+apply generic_format_bpow_inv' in Fx...
+unfold ulp, canonic_exp.
+rewrite ln_beta_bpow.
+revert Fx.
+generalize (ln_beta_val _ _ (ln_beta beta (-x)) - 1)%Z.
+clear -monotone_exp valid_exp.
+intros e He.
+destruct (Zle_lt_or_eq _ _ He) as [He1|He1].
+assert (He2 : e = (e - fexp (e + 1) + fexp (e + 1))%Z) by ring.
+rewrite He2 at 1.
+rewrite bpow_plus.
+assert (Hb := Z2R_Zpower beta _ (Zle_minus_le_0 _ _ He)).
+match goal with |- F (?a * ?b + - ?b) =>
+ replace (a * b + -b)%R with ((a - 1) * b)%R by ring end.
+rewrite <- Hb.
+rewrite <- (Z2R_minus _ 1).
+change (F (F2R (Float beta (Zpower beta (e - fexp (e + 1)) - 1) (fexp (e + 1))))).
+apply generic_format_F2R.
+intros Zb.
+unfold canonic_exp.
+rewrite ln_beta_F2R with (1 := Zb).
+rewrite (ln_beta_unique beta _ (e - fexp (e + 1))).
+apply monotone_exp.
+rewrite <- He2.
+apply Zle_succ.
+rewrite Rabs_pos_eq.
+rewrite Z2R_minus, Hb.
+split.
+apply Rplus_le_reg_r with (- bpow (e - fexp (e + 1) - 1) + Z2R 1)%R.
+apply Rmult_le_reg_r with (bpow (-(e - fexp (e+1) - 1))).
+apply bpow_gt_0.
+ring_simplify.
+apply Rle_trans with R1.
+rewrite Rmult_1_l.
+apply (bpow_le _ _ 0).
+clear -He1 ; omega.
+rewrite Ropp_mult_distr_l_reverse.
+rewrite <- 2!bpow_plus.
+ring_simplify (e - fexp (e + 1) - 1 + - (e - fexp (e + 1) - 1))%Z.
+ring_simplify (- (e - fexp (e + 1) - 1) + (e - fexp (e + 1)))%Z.
+rewrite bpow_1.
+rewrite <- (Z2R_plus (-1) _).
+apply (Z2R_le 1).
+generalize (Zle_bool_imp_le _ _ (radix_prop beta)).
+clear ; omega.
+rewrite <- (Rplus_0_r (bpow (e - fexp (e + 1)))) at 2.
+apply Rplus_lt_compat_l.
+now apply (Z2R_lt (-1) 0).
+rewrite Z2R_minus.
+apply Rle_0_minus.
+rewrite Hb.
+apply (bpow_le _ 0).
+now apply Zle_minus_le_0.
+rewrite He1, Rplus_opp_r.
+apply generic_format_0.
+apply generic_format_pred_1 ; try easy.
+rewrite <- Ropp_0.
+now apply Ropp_lt_contravar.
+now elim Zx.
+now apply generic_format_succ.
+Qed.
+
+Theorem generic_format_minus_ulp :
+ forall { monotone_exp : Monotone_exp fexp },
+ forall x, (x <> 0)%R ->
+ F x -> F (x - ulp x).
+Proof.
+intros monotone_exp x Zx Fx.
+replace (x - ulp x)%R with (-(-x + ulp x))%R by ring.
+apply generic_format_opp.
+rewrite <- ulp_opp.
+apply generic_format_plus_ulp.
+contradict Zx.
+rewrite <- (Ropp_involutive x), Zx.
+apply Ropp_0.
+now apply generic_format_opp.
+Qed.
+
+Lemma pred_plus_ulp_1 :
+ forall x, (0 < x)%R -> F x ->
+ x <> bpow (ln_beta beta x - 1) ->
+ ((x - ulp x) + ulp (x-ulp x) = x)%R.
+Proof.
+intros x Zx Fx Hx.
+replace (ulp (x - ulp x)) with (ulp x).
+ring.
+unfold ulp at 1 2; apply f_equal.
+unfold canonic_exp; apply f_equal.
+destruct (ln_beta beta x) as (ex, Hex).
+simpl in *.
+assert (x <> 0)%R by auto with real.
+specialize (Hex H).
+apply sym_eq.
+apply ln_beta_unique.
+rewrite Rabs_right.
+rewrite Rabs_right in Hex.
+2: apply Rle_ge; apply Rlt_le; easy.
+split.
+destruct Hex as ([H1|H1],H2).
+apply pred_ge_bpow; trivial.
+unfold ulp; intros H3.
+assert (ex-1 < canonic_exp beta fexp x < ex)%Z.
+split ; apply (lt_bpow beta) ; rewrite <- H3 ; easy.
+omega.
+contradict Hx; auto with real.
+apply Rle_lt_trans with (2:=proj2 Hex).
+rewrite <- Rplus_0_r.
+apply Rplus_le_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_le_contravar.
+apply bpow_ge_0.
+apply Rle_ge.
+apply Rle_0_minus.
+pattern x at 2; rewrite Fx.
+unfold ulp, F2R; simpl.
+pattern (bpow (canonic_exp beta fexp x)) at 1; rewrite <- Rmult_1_l.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 1%R with (Z2R (Zsucc 0)) by reflexivity.
+apply Z2R_le.
+apply Zlt_le_succ.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+now rewrite <- Fx.
+Qed.
+
+Lemma pred_plus_ulp_2 :
+ forall x, (0 < x)%R -> F x ->
+ let e := ln_beta_val beta x (ln_beta beta x) in
+ x = bpow (e - 1) ->
+ (x - bpow (fexp (e-1)) <> 0)%R ->
+ ((x - bpow (fexp (e-1))) + ulp (x - bpow (fexp (e-1))) = x)%R.
+Proof.
+intros x Zx Fx e Hxe Zp.
+replace (ulp (x - bpow (fexp (e - 1)))) with (bpow (fexp (e - 1))).
+ring.
+assert (He:(fexp (e-1) <= e-1)%Z).
+apply generic_format_bpow_inv with beta; trivial.
+rewrite <- Hxe; assumption.
+case (Zle_lt_or_eq _ _ He); clear He; intros He.
+(* *)
+unfold ulp; apply f_equal.
+unfold canonic_exp; apply f_equal.
+apply sym_eq.
+apply ln_beta_unique.
+rewrite Rabs_right.
+split.
+apply Rplus_le_reg_l with (bpow (fexp (e-1))).
+ring_simplify.
+apply Rle_trans with (bpow (e - 2) + bpow (e - 2))%R.
+apply Rplus_le_compat; apply bpow_le; omega.
+apply Rle_trans with (2*bpow (e - 2))%R;[right; ring|idtac].
+apply Rle_trans with (bpow 1*bpow (e - 2))%R.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+replace 2%R with (Z2R 2) by reflexivity.
+replace (bpow 1) with (Z2R beta).
+apply Z2R_le.
+apply <- Zle_is_le_bool.
+now destruct beta.
+simpl.
+unfold Zpower_pos; simpl.
+now rewrite Zmult_1_r.
+rewrite <- bpow_plus.
+replace (1+(e-2))%Z with (e-1)%Z by ring.
+now right.
+rewrite <- Rplus_0_r, Hxe.
+apply Rplus_lt_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply bpow_gt_0.
+apply Rle_ge; apply Rle_0_minus.
+rewrite Hxe.
+apply bpow_le.
+omega.
+(* *)
+contradict Zp.
+rewrite Hxe, He; ring.
+Qed.
+
+Theorem pred_plus_ulp :
+ forall x, (0 < x)%R -> F x ->
+ (pred x <> 0)%R ->
+ (pred x + ulp (pred x) = x)%R.
+Proof.
+intros x Zx Fx.
+unfold pred.
+case Req_bool_spec; intros H Zp.
+now apply pred_plus_ulp_2.
+now apply pred_plus_ulp_1.
+Qed.
+
+Theorem pred_lt_id :
+ forall x,
+ (pred x < x)%R.
+Proof.
+intros.
+unfold pred.
+case Req_bool_spec; intros H.
+(* *)
+rewrite <- Rplus_0_r.
+apply Rplus_lt_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply bpow_gt_0.
+(* *)
+rewrite <- Rplus_0_r.
+apply Rplus_lt_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+apply bpow_gt_0.
+Qed.
+
+Theorem pred_ge_0 :
+ forall x,
+ (0 < x)%R -> F x -> (0 <= pred x)%R.
+intros x Zx Fx.
+unfold pred.
+case Req_bool_spec; intros H.
+(* *)
+apply Rle_0_minus.
+rewrite H.
+apply bpow_le.
+destruct (ln_beta beta x) as (ex,Ex) ; simpl.
+rewrite ln_beta_bpow.
+ring_simplify (ex - 1 + 1 - 1)%Z.
+apply generic_format_bpow_inv with beta; trivial.
+simpl in H.
+rewrite <- H; assumption.
+apply Rle_0_minus.
+now apply ulp_le_id.
+Qed.
+
+Theorem round_UP_pred :
+ forall x, (0 < pred x)%R -> F x ->
+ forall eps, (0 < eps <= ulp (pred x) )%R ->
+ round beta fexp Zceil (pred x + eps) = x.
+Proof.
+intros x Hx Fx eps Heps.
+rewrite round_UP_succ; trivial.
+apply pred_plus_ulp; trivial.
+apply Rlt_trans with (1:=Hx).
+apply pred_lt_id.
+now apply Rgt_not_eq.
+apply generic_format_pred; trivial.
+apply Rlt_trans with (1:=Hx).
+apply pred_lt_id.
+Qed.
+
+Theorem round_DN_pred :
+ forall x, (0 < pred x)%R -> F x ->
+ forall eps, (0 < eps <= ulp (pred x))%R ->
+ round beta fexp Zfloor (x - eps) = pred x.
+Proof.
+intros x Hpx Fx eps Heps.
+assert (Hx:(0 < x)%R).
+apply Rlt_trans with (1:=Hpx).
+apply pred_lt_id.
+replace (x-eps)%R with (pred x + (ulp (pred x)-eps))%R.
+2: pattern x at 3; rewrite <- (pred_plus_ulp x); trivial.
+2: ring.
+2: now apply Rgt_not_eq.
+rewrite round_DN_succ; trivial.
+now apply generic_format_pred.
+split.
+apply Rle_0_minus.
+now apply Heps.
+rewrite <- Rplus_0_r.
+apply Rplus_lt_compat_l.
+rewrite <- Ropp_0.
+apply Ropp_lt_contravar.
+now apply Heps.
+Qed.
+
+Lemma le_pred_lt_aux :
+ forall x y,
+ F x -> F y ->
+ (0 < x < y)%R ->
+ (x <= pred y)%R.
+Proof with auto with typeclass_instances.
+intros x y Hx Hy H.
+assert (Zy:(0 < y)%R).
+apply Rlt_trans with (1:=proj1 H).
+apply H.
+(* *)
+assert (Zp: (0 < pred y)%R).
+assert (Zp:(0 <= pred y)%R).
+apply pred_ge_0 ; trivial.
+destruct Zp; trivial.
+generalize H0.
+unfold pred.
+destruct (ln_beta beta y) as (ey,Hey); simpl.
+case Req_bool_spec; intros Hy2.
+(* . *)
+intros Hy3.
+assert (ey-1 = fexp (ey -1))%Z.
+apply bpow_inj with beta.
+rewrite <- Hy2, <- Rplus_0_l, Hy3.
+ring.
+assert (Zx: (x <> 0)%R).
+now apply Rgt_not_eq.
+destruct (ln_beta beta x) as (ex,Hex).
+specialize (Hex Zx).
+assert (ex <= ey)%Z.
+apply bpow_lt_bpow with beta.
+apply Rle_lt_trans with (1:=proj1 Hex).
+apply Rlt_trans with (Rabs y).
+rewrite 2!Rabs_right.
+apply H.
+now apply Rgt_ge.
+now apply Rgt_ge.
+apply Hey.
+now apply Rgt_not_eq.
+case (Zle_lt_or_eq _ _ H2); intros Hexy.
+assert (fexp ex = fexp (ey-1))%Z.
+apply valid_exp.
+omega.
+rewrite <- H1.
+omega.
+absurd (0 < Ztrunc (scaled_mantissa beta fexp x) < 1)%Z.
+omega.
+split.
+apply F2R_gt_0_reg with beta (canonic_exp beta fexp x).
+now rewrite <- Hx.
+apply lt_Z2R.
+apply Rmult_lt_reg_r with (bpow (canonic_exp beta fexp x)).
+apply bpow_gt_0.
+replace (Z2R (Ztrunc (scaled_mantissa beta fexp x)) *
+ bpow (canonic_exp beta fexp x))%R with x.
+rewrite Rmult_1_l.
+unfold canonic_exp.
+rewrite ln_beta_unique with beta x ex.
+rewrite H3,<-H1, <- Hy2.
+apply H.
+exact Hex.
+absurd (y <= x)%R.
+now apply Rlt_not_le.
+rewrite Rabs_right in Hex.
+apply Rle_trans with (2:=proj1 Hex).
+rewrite Hexy, Hy2.
+now apply Rle_refl.
+now apply Rgt_ge.
+(* . *)
+intros Hy3.
+assert (y = bpow (fexp ey))%R.
+apply Rminus_diag_uniq.
+rewrite Hy3.
+unfold ulp, canonic_exp.
+rewrite (ln_beta_unique beta y ey); trivial.
+apply Hey.
+now apply Rgt_not_eq.
+contradict Hy2.
+rewrite H1.
+apply f_equal.
+apply Zplus_reg_l with 1%Z.
+ring_simplify.
+apply trans_eq with (ln_beta beta y).
+apply sym_eq; apply ln_beta_unique.
+rewrite H1, Rabs_right.
+split.
+apply bpow_le.
+omega.
+apply bpow_lt.
+omega.
+apply Rle_ge; apply bpow_ge_0.
+apply ln_beta_unique.
+apply Hey.
+now apply Rgt_not_eq.
+(* *)
+case (Rle_or_lt (ulp (pred y)) (y-x)); intros H1.
+(* . *)
+apply Rplus_le_reg_r with (-x + ulp (pred y))%R.
+ring_simplify (x+(-x+ulp (pred y)))%R.
+apply Rle_trans with (1:=H1).
+rewrite <- (pred_plus_ulp y) at 1; trivial.
+apply Req_le; ring.
+now apply Rgt_not_eq.
+(* . *)
+replace x with (y-(y-x))%R by ring.
+rewrite <- round_DN_pred with (eps:=(y-x)%R); try easy.
+ring_simplify (y-(y-x))%R.
+apply Req_le.
+apply sym_eq.
+apply round_generic...
+split; trivial.
+now apply Rlt_Rminus.
+now apply Rlt_le.
+Qed.
+
+Theorem le_pred_lt :
+ forall x y,
+ F x -> F y ->
+ (0 < y)%R ->
+ (x < y)%R ->
+ (x <= pred y)%R.
+Proof.
+intros x y Fx Fy Hy Hxy.
+destruct (Rle_or_lt x 0) as [Hx|Hx].
+apply Rle_trans with (1 := Hx).
+now apply pred_ge_0.
+apply le_pred_lt_aux ; try easy.
+now split.
+Qed.
+
+End Fcore_ulp.
diff --git a/flocq/Flocq_version.v b/flocq/Flocq_version.v
new file mode 100644
index 0000000..662d83a
--- /dev/null
+++ b/flocq/Flocq_version.v
@@ -0,0 +1,31 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** Helper for detecting the version of Flocq *)
+Require Import BinNat.
+Require Import Ascii String.
+
+Definition Flocq_version := Eval vm_compute in
+ let fix parse s major minor :=
+ match s with
+ | String "."%char t => parse t (major * 100 + minor)%N N0
+ | String h t => parse t major (minor + N_of_ascii h - N_of_ascii "0"%char)%N
+ | Empty_string => (major * 100 + minor)%N
+ end in
+ parse "2.1.0"%string N0 N0.
diff --git a/flocq/Prop/Fprop_Sterbenz.v b/flocq/Prop/Fprop_Sterbenz.v
new file mode 100644
index 0000000..7d2c2e7
--- /dev/null
+++ b/flocq/Prop/Fprop_Sterbenz.v
@@ -0,0 +1,169 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Sterbenz conditions for exact subtraction *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_generic_fmt.
+Require Import Fcalc_ops.
+
+Section Fprop_Sterbenz.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable fexp : Z -> Z.
+Context { valid_exp : Valid_exp fexp }.
+Context { monotone_exp : Monotone_exp fexp }.
+Notation format := (generic_format beta fexp).
+
+Theorem generic_format_plus :
+ forall x y,
+ format x -> format y ->
+ (Rabs (x + y) < bpow (Zmin (ln_beta beta x) (ln_beta beta y)))%R ->
+ format (x + y)%R.
+Proof.
+intros x y Fx Fy Hxy.
+destruct (Req_dec (x + y) 0) as [Zxy|Zxy].
+rewrite Zxy.
+apply generic_format_0.
+destruct (Req_dec x R0) as [Zx|Zx].
+now rewrite Zx, Rplus_0_l.
+destruct (Req_dec y R0) as [Zy|Zy].
+now rewrite Zy, Rplus_0_r.
+revert Hxy.
+destruct (ln_beta beta x) as (ex, Ex). simpl.
+specialize (Ex Zx).
+destruct (ln_beta beta y) as (ey, Ey). simpl.
+specialize (Ey Zy).
+intros Hxy.
+set (fx := Float beta (Ztrunc (scaled_mantissa beta fexp x)) (fexp ex)).
+assert (Hx: x = F2R fx).
+rewrite Fx at 1.
+unfold canonic_exp.
+now rewrite ln_beta_unique with (1 := Ex).
+set (fy := Float beta (Ztrunc (scaled_mantissa beta fexp y)) (fexp ey)).
+assert (Hy: y = F2R fy).
+rewrite Fy at 1.
+unfold canonic_exp.
+now rewrite ln_beta_unique with (1 := Ey).
+rewrite Hx, Hy.
+rewrite <- F2R_plus.
+apply generic_format_F2R.
+intros _.
+case_eq (Fplus beta fx fy).
+intros mxy exy Pxy.
+rewrite <- Pxy, F2R_plus, <- Hx, <- Hy.
+unfold canonic_exp.
+replace exy with (fexp (Zmin ex ey)).
+apply monotone_exp.
+now apply ln_beta_le_bpow.
+replace exy with (Fexp (Fplus beta fx fy)) by exact (f_equal Fexp Pxy).
+rewrite Fexp_Fplus.
+simpl. clear -monotone_exp.
+apply sym_eq.
+destruct (Zmin_spec ex ey) as [(H1,H2)|(H1,H2)] ; rewrite H2.
+apply Zmin_l.
+now apply monotone_exp.
+apply Zmin_r.
+apply monotone_exp.
+apply Zlt_le_weak.
+now apply Zgt_lt.
+Qed.
+
+Theorem generic_format_plus_weak :
+ forall x y,
+ format x -> format y ->
+ (Rabs (x + y) <= Rmin (Rabs x) (Rabs y))%R ->
+ format (x + y)%R.
+Proof.
+intros x y Fx Fy Hxy.
+destruct (Req_dec x R0) as [Zx|Zx].
+now rewrite Zx, Rplus_0_l.
+destruct (Req_dec y R0) as [Zy|Zy].
+now rewrite Zy, Rplus_0_r.
+apply generic_format_plus ; try assumption.
+apply Rle_lt_trans with (1 := Hxy).
+unfold Rmin.
+destruct (Rle_dec (Rabs x) (Rabs y)) as [Hxy'|Hxy'].
+rewrite Zmin_l.
+destruct (ln_beta beta x) as (ex, Hx).
+now apply Hx.
+now apply ln_beta_le_abs.
+rewrite Zmin_r.
+destruct (ln_beta beta y) as (ex, Hy).
+now apply Hy.
+apply ln_beta_le_abs.
+exact Zy.
+apply Rlt_le.
+now apply Rnot_le_lt.
+Qed.
+
+Lemma sterbenz_aux :
+ forall x y, format x -> format y ->
+ (y <= x <= 2 * y)%R ->
+ format (x - y)%R.
+Proof.
+intros x y Hx Hy (Hxy1, Hxy2).
+unfold Rminus.
+apply generic_format_plus_weak.
+exact Hx.
+now apply generic_format_opp.
+rewrite Rabs_pos_eq.
+rewrite Rabs_Ropp.
+rewrite Rmin_comm.
+assert (Hy0: (0 <= y)%R).
+apply Rplus_le_reg_r with y.
+apply Rle_trans with x.
+now rewrite Rplus_0_l.
+now rewrite Rmult_plus_distr_r, Rmult_1_l in Hxy2.
+rewrite Rabs_pos_eq with (1 := Hy0).
+rewrite Rabs_pos_eq.
+unfold Rmin.
+destruct (Rle_dec y x) as [Hyx|Hyx].
+apply Rplus_le_reg_r with y.
+now ring_simplify.
+now elim Hyx.
+now apply Rle_trans with y.
+now apply Rle_0_minus.
+Qed.
+
+Theorem sterbenz :
+ forall x y, format x -> format y ->
+ (y / 2 <= x <= 2 * y)%R ->
+ format (x - y)%R.
+Proof.
+intros x y Hx Hy (Hxy1, Hxy2).
+destruct (Rle_or_lt x y) as [Hxy|Hxy].
+rewrite <- Ropp_minus_distr.
+apply generic_format_opp.
+apply sterbenz_aux ; try easy.
+split.
+exact Hxy.
+apply Rcompare_not_Lt_inv.
+rewrite <- Rcompare_half_r.
+now apply Rcompare_not_Lt.
+apply sterbenz_aux ; try easy.
+split.
+now apply Rlt_le.
+exact Hxy2.
+Qed.
+
+End Fprop_Sterbenz.
diff --git a/flocq/Prop/Fprop_div_sqrt_error.v b/flocq/Prop/Fprop_div_sqrt_error.v
new file mode 100644
index 0000000..84a0694
--- /dev/null
+++ b/flocq/Prop/Fprop_div_sqrt_error.v
@@ -0,0 +1,299 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Remainder of the division and square root are in the FLX format *)
+Require Import Fcore.
+Require Import Fcalc_ops.
+Require Import Fprop_relative.
+
+Section Fprop_divsqrt_error.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable prec : Z.
+
+Theorem generic_format_plus_prec:
+ forall fexp, (forall e, (fexp e <= e - prec)%Z) ->
+ forall x y (fx fy: float beta),
+ (x = F2R fx)%R -> (y = F2R fy)%R -> (Rabs (x+y) < bpow (prec+Fexp fx))%R -> (Rabs (x+y) < bpow (prec+Fexp fy))%R
+ -> generic_format beta fexp (x+y)%R.
+intros fexp Hfexp x y fx fy Hx Hy H1 H2.
+case (Req_dec (x+y) 0); intros H.
+rewrite H; apply generic_format_0.
+rewrite Hx, Hy, <- F2R_plus.
+apply generic_format_F2R.
+intros _.
+case_eq (Fplus beta fx fy).
+intros mz ez Hz.
+rewrite <- Hz.
+apply Zle_trans with (Zmin (Fexp fx) (Fexp fy)).
+rewrite F2R_plus, <- Hx, <- Hy.
+unfold canonic_exp.
+apply Zle_trans with (1:=Hfexp _).
+apply Zplus_le_reg_l with prec; ring_simplify.
+apply ln_beta_le_bpow with (1 := H).
+now apply Zmin_case.
+rewrite <- Fexp_Fplus, Hz.
+apply Zle_refl.
+Qed.
+
+Theorem ex_Fexp_canonic: forall fexp, forall x, generic_format beta fexp x
+ -> exists fx:float beta, (x=F2R fx)%R /\ Fexp fx = canonic_exp beta fexp x.
+intros fexp x; unfold generic_format.
+exists (Float beta (Ztrunc (scaled_mantissa beta fexp x)) (canonic_exp beta fexp x)).
+split; auto.
+Qed.
+
+
+Context { prec_gt_0_ : Prec_gt_0 prec }.
+
+Notation format := (generic_format beta (FLX_exp prec)).
+Notation cexp := (canonic_exp beta (FLX_exp prec)).
+
+Variable choice : Z -> bool.
+
+
+(** Remainder of the division in FLX *)
+Theorem div_error_FLX :
+ forall rnd { Zrnd : Valid_rnd rnd } x y,
+ format x -> format y ->
+ format (x - round beta (FLX_exp prec) rnd (x/y) * y)%R.
+Proof with auto with typeclass_instances.
+intros rnd Zrnd x y Hx Hy.
+destruct (Req_dec y 0) as [Zy|Zy].
+now rewrite Zy, Rmult_0_r, Rminus_0_r.
+destruct (Req_dec (round beta (FLX_exp prec) rnd (x/y)) 0) as [Hr|Hr].
+rewrite Hr; ring_simplify (x-0*y)%R; assumption.
+assert (Zx: x <> R0).
+contradict Hr.
+rewrite Hr.
+unfold Rdiv.
+now rewrite Rmult_0_l, round_0.
+destruct (ex_Fexp_canonic _ x Hx) as (fx,(Hx1,Hx2)).
+destruct (ex_Fexp_canonic _ y Hy) as (fy,(Hy1,Hy2)).
+destruct (ex_Fexp_canonic (FLX_exp prec) (round beta (FLX_exp prec) rnd (x / y))) as (fr,(Hr1,Hr2)).
+apply generic_format_round...
+unfold Rminus; apply generic_format_plus_prec with fx (Fopp beta (Fmult beta fr fy)); trivial.
+intros e; apply Zle_refl.
+now rewrite F2R_opp, F2R_mult, <- Hr1, <- Hy1.
+(* *)
+destruct (relative_error_FLX_ex beta prec (prec_gt_0 prec) rnd (x / y)%R) as (eps,(Heps1,Heps2)).
+apply Rmult_integral_contrapositive_currified.
+exact Zx.
+now apply Rinv_neq_0_compat.
+rewrite Heps2.
+rewrite <- Rabs_Ropp.
+replace (-(x + - (x / y * (1 + eps) * y)))%R with (x * eps)%R by now field.
+rewrite Rabs_mult.
+apply Rlt_le_trans with (Rabs x * 1)%R.
+apply Rmult_lt_compat_l.
+now apply Rabs_pos_lt.
+apply Rlt_le_trans with (1 := Heps1).
+change R1 with (bpow 0).
+apply bpow_le.
+generalize (prec_gt_0 prec).
+clear ; omega.
+rewrite Rmult_1_r.
+rewrite Hx2.
+unfold canonic_exp.
+destruct (ln_beta beta x) as (ex, Hex).
+simpl.
+specialize (Hex Zx).
+apply Rlt_le.
+apply Rlt_le_trans with (1 := proj2 Hex).
+apply bpow_le.
+unfold FLX_exp.
+ring_simplify.
+apply Zle_refl.
+(* *)
+replace (Fexp (Fopp beta (Fmult beta fr fy))) with (Fexp fr + Fexp fy)%Z.
+2: unfold Fopp, Fmult; destruct fr; destruct fy; now simpl.
+replace (x + - (round beta (FLX_exp prec) rnd (x / y) * y))%R with
+ (y * (-(round beta (FLX_exp prec) rnd (x / y) - x/y)))%R.
+2: field; assumption.
+rewrite Rabs_mult.
+apply Rlt_le_trans with (Rabs y * bpow (Fexp fr))%R.
+apply Rmult_lt_compat_l.
+now apply Rabs_pos_lt.
+rewrite Rabs_Ropp.
+replace (bpow (Fexp fr)) with (ulp beta (FLX_exp prec) (F2R fr)).
+rewrite <- Hr1.
+apply ulp_error_f...
+unfold ulp; apply f_equal.
+now rewrite Hr2, <- Hr1.
+replace (prec+(Fexp fr+Fexp fy))%Z with ((prec+Fexp fy)+Fexp fr)%Z by ring.
+rewrite bpow_plus.
+apply Rmult_le_compat_r.
+apply bpow_ge_0.
+rewrite Hy2; unfold canonic_exp, FLX_exp.
+ring_simplify (prec + (ln_beta beta y - prec))%Z.
+destruct (ln_beta beta y); simpl.
+left; now apply a.
+Qed.
+
+(** Remainder of the square in FLX (with p>1) and rounding to nearest *)
+Variable Hp1 : Zlt 1 prec.
+
+Theorem sqrt_error_FLX_N :
+ forall x, format x ->
+ format (x - Rsqr (round beta (FLX_exp prec) (Znearest choice) (sqrt x)))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+destruct (total_order_T x 0) as [[Hxz|Hxz]|Hxz].
+unfold sqrt.
+destruct (Rcase_abs x).
+rewrite round_0...
+unfold Rsqr.
+now rewrite Rmult_0_l, Rminus_0_r.
+elim (Rlt_irrefl 0).
+now apply Rgt_ge_trans with x.
+rewrite Hxz, sqrt_0, round_0...
+unfold Rsqr.
+rewrite Rmult_0_l, Rminus_0_r.
+apply generic_format_0.
+case (Req_dec (round beta (FLX_exp prec) (Znearest choice) (sqrt x)) 0); intros Hr.
+rewrite Hr; unfold Rsqr; ring_simplify (x-0*0)%R; assumption.
+destruct (ex_Fexp_canonic _ x Hx) as (fx,(Hx1,Hx2)).
+destruct (ex_Fexp_canonic (FLX_exp prec) (round beta (FLX_exp prec) (Znearest choice) (sqrt x))) as (fr,(Hr1,Hr2)).
+apply generic_format_round...
+unfold Rminus; apply generic_format_plus_prec with fx (Fopp beta (Fmult beta fr fr)); trivial.
+intros e; apply Zle_refl.
+unfold Rsqr; now rewrite F2R_opp,F2R_mult, <- Hr1.
+(* *)
+apply Rle_lt_trans with x.
+apply Rabs_minus_le.
+apply Rle_0_sqr.
+destruct (relative_error_N_FLX_ex beta prec (prec_gt_0 prec) choice (sqrt x)) as (eps,(Heps1,Heps2)).
+rewrite Heps2.
+rewrite Rsqr_mult, Rsqr_sqrt, Rmult_comm. 2: now apply Rlt_le.
+apply Rmult_le_compat_r.
+now apply Rlt_le.
+apply Rle_trans with (5²/4²)%R.
+rewrite <- Rsqr_div.
+apply Rsqr_le_abs_1.
+apply Rle_trans with (1 := Rabs_triang _ _).
+rewrite Rabs_R1.
+apply Rplus_le_reg_l with (-1)%R.
+rewrite <- Rplus_assoc, Rplus_opp_l, Rplus_0_l.
+apply Rle_trans with (1 := Heps1).
+rewrite Rabs_pos_eq.
+apply Rmult_le_reg_l with 2%R.
+now apply (Z2R_lt 0 2).
+rewrite <- Rmult_assoc, Rinv_r, Rmult_1_l.
+apply Rle_trans with (bpow (-1)).
+apply bpow_le.
+omega.
+replace (2 * (-1 + 5 / 4))%R with (/2)%R by field.
+apply Rinv_le.
+now apply (Z2R_lt 0 2).
+apply (Z2R_le 2).
+unfold Zpower_pos. simpl.
+rewrite Zmult_1_r.
+apply Zle_bool_imp_le.
+apply beta.
+apply Rgt_not_eq.
+now apply (Z2R_lt 0 2).
+unfold Rdiv.
+apply Rmult_le_pos.
+now apply (Z2R_le 0 5).
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 4).
+apply Rgt_not_eq.
+now apply (Z2R_lt 0 4).
+unfold Rsqr.
+replace (5 * 5 / (4 * 4))%R with (25 * /16)%R by field.
+apply Rmult_le_reg_r with 16%R.
+now apply (Z2R_lt 0 16).
+rewrite Rmult_assoc, Rinv_l, Rmult_1_r.
+now apply (Z2R_le 25 32).
+apply Rgt_not_eq.
+now apply (Z2R_lt 0 16).
+rewrite Hx2; unfold canonic_exp, FLX_exp.
+ring_simplify (prec + (ln_beta beta x - prec))%Z.
+destruct (ln_beta beta x); simpl.
+rewrite <- (Rabs_right x).
+apply a.
+now apply Rgt_not_eq.
+now apply Rgt_ge.
+(* *)
+replace (Fexp (Fopp beta (Fmult beta fr fr))) with (Fexp fr + Fexp fr)%Z.
+2: unfold Fopp, Fmult; destruct fr; now simpl.
+rewrite Hr1.
+replace (x + - Rsqr (F2R fr))%R with (-((F2R fr - sqrt x)*(F2R fr + sqrt x)))%R.
+2: rewrite <- (sqrt_sqrt x) at 3; auto.
+2: unfold Rsqr; ring.
+rewrite Rabs_Ropp, Rabs_mult.
+apply Rle_lt_trans with ((/2*bpow (Fexp fr))* Rabs (F2R fr + sqrt x))%R.
+apply Rmult_le_compat_r.
+apply Rabs_pos.
+apply Rle_trans with (/2*ulp beta (FLX_exp prec) (F2R fr))%R.
+rewrite <- Hr1.
+apply ulp_half_error_f...
+right; unfold ulp; apply f_equal.
+rewrite Hr2, <- Hr1; trivial.
+rewrite Rmult_assoc, Rmult_comm.
+replace (prec+(Fexp fr+Fexp fr))%Z with (Fexp fr + (prec+Fexp fr))%Z by ring.
+rewrite bpow_plus, Rmult_assoc.
+apply Rmult_lt_compat_l.
+apply bpow_gt_0.
+apply Rmult_lt_reg_l with 2%R.
+auto with real.
+apply Rle_lt_trans with (Rabs (F2R fr + sqrt x)).
+right; field.
+apply Rle_lt_trans with (1:=Rabs_triang _ _).
+(* . *)
+assert (Rabs (F2R fr) < bpow (prec + Fexp fr))%R.
+rewrite Hr2; unfold canonic_exp; rewrite Hr1.
+unfold FLX_exp.
+ring_simplify (prec + (ln_beta beta (F2R fr) - prec))%Z.
+destruct (ln_beta beta (F2R fr)); simpl.
+apply a.
+rewrite <- Hr1; auto.
+(* . *)
+apply Rlt_le_trans with (bpow (prec + Fexp fr)+ Rabs (sqrt x))%R.
+now apply Rplus_lt_compat_r.
+(* . *)
+rewrite Rmult_plus_distr_r, Rmult_1_l.
+apply Rplus_le_compat_l.
+assert (sqrt x <> 0)%R.
+apply Rgt_not_eq.
+now apply sqrt_lt_R0.
+destruct (ln_beta beta (sqrt x)) as (es,Es).
+specialize (Es H0).
+apply Rle_trans with (bpow es).
+now apply Rlt_le.
+apply bpow_le.
+case (Zle_or_lt es (prec + Fexp fr)) ; trivial.
+intros H1.
+absurd (Rabs (F2R fr) < bpow (es - 1))%R.
+apply Rle_not_lt.
+rewrite <- Hr1.
+apply abs_round_ge_generic...
+apply generic_format_bpow.
+unfold FLX_exp; omega.
+apply Es.
+apply Rlt_le_trans with (1:=H).
+apply bpow_le.
+omega.
+now apply Rlt_le.
+Qed.
+
+End Fprop_divsqrt_error.
diff --git a/flocq/Prop/Fprop_mult_error.v b/flocq/Prop/Fprop_mult_error.v
new file mode 100644
index 0000000..e3e094c
--- /dev/null
+++ b/flocq/Prop/Fprop_mult_error.v
@@ -0,0 +1,237 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Error of the multiplication is in the FLX/FLT format *)
+Require Import Fcore.
+Require Import Fcalc_ops.
+
+Section Fprop_mult_error.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable prec : Z.
+Context { prec_gt_0_ : Prec_gt_0 prec }.
+
+Notation format := (generic_format beta (FLX_exp prec)).
+Notation cexp := (canonic_exp beta (FLX_exp prec)).
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+(** Auxiliary result that provides the exponent *)
+Lemma mult_error_FLX_aux:
+ forall x y,
+ format x -> format y ->
+ (round beta (FLX_exp prec) rnd (x * y) - (x * y) <> 0)%R ->
+ exists f:float beta,
+ (F2R f = round beta (FLX_exp prec) rnd (x * y) - (x * y))%R
+ /\ (canonic_exp beta (FLX_exp prec) (F2R f) <= Fexp f)%Z
+ /\ (Fexp f = cexp x + cexp y)%Z.
+Proof with auto with typeclass_instances.
+intros x y Hx Hy Hz.
+set (f := (round beta (FLX_exp prec) rnd (x * y))).
+destruct (Req_dec (x * y) 0) as [Hxy0|Hxy0].
+contradict Hz.
+rewrite Hxy0.
+rewrite round_0...
+ring.
+destruct (ln_beta beta (x * y)) as (exy, Hexy).
+specialize (Hexy Hxy0).
+destruct (ln_beta beta (f - x * y)) as (er, Her).
+specialize (Her Hz).
+destruct (ln_beta beta x) as (ex, Hex).
+assert (Hx0: (x <> 0)%R).
+contradict Hxy0.
+now rewrite Hxy0, Rmult_0_l.
+specialize (Hex Hx0).
+destruct (ln_beta beta y) as (ey, Hey).
+assert (Hy0: (y <> 0)%R).
+contradict Hxy0.
+now rewrite Hxy0, Rmult_0_r.
+specialize (Hey Hy0).
+(* *)
+assert (Hc1: (cexp (x * y)%R - prec <= cexp x + cexp y)%Z).
+unfold canonic_exp, FLX_exp.
+rewrite ln_beta_unique with (1 := Hex).
+rewrite ln_beta_unique with (1 := Hey).
+rewrite ln_beta_unique with (1 := Hexy).
+cut (exy - 1 < ex + ey)%Z. omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (1 := proj1 Hexy).
+rewrite Rabs_mult.
+rewrite bpow_plus.
+apply Rmult_le_0_lt_compat.
+apply Rabs_pos.
+apply Rabs_pos.
+apply Hex.
+apply Hey.
+(* *)
+assert (Hc2: (cexp x + cexp y <= cexp (x * y)%R)%Z).
+unfold canonic_exp, FLX_exp.
+rewrite ln_beta_unique with (1 := Hex).
+rewrite ln_beta_unique with (1 := Hey).
+rewrite ln_beta_unique with (1 := Hexy).
+cut ((ex - 1) + (ey - 1) < exy)%Z.
+generalize (prec_gt_0 prec).
+clear ; omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 Hexy).
+rewrite Rabs_mult.
+rewrite bpow_plus.
+apply Rmult_le_compat.
+apply bpow_ge_0.
+apply bpow_ge_0.
+apply Hex.
+apply Hey.
+(* *)
+assert (Hr: ((F2R (Float beta (- (Ztrunc (scaled_mantissa beta (FLX_exp prec) x) *
+ Ztrunc (scaled_mantissa beta (FLX_exp prec) y)) + rnd (scaled_mantissa beta (FLX_exp prec) (x * y)) *
+ beta ^ (cexp (x * y)%R - (cexp x + cexp y))) (cexp x + cexp y))) = f - x * y)%R).
+rewrite Hx at 6.
+rewrite Hy at 6.
+rewrite <- F2R_mult.
+simpl.
+unfold f, round, Rminus.
+rewrite <- F2R_opp, Rplus_comm, <- F2R_plus.
+unfold Fplus. simpl.
+now rewrite Zle_imp_le_bool with (1 := Hc2).
+(* *)
+exists (Float beta (- (Ztrunc (scaled_mantissa beta (FLX_exp prec) x) *
+ Ztrunc (scaled_mantissa beta (FLX_exp prec) y)) + rnd (scaled_mantissa beta (FLX_exp prec) (x * y)) *
+ beta ^ (cexp (x * y)%R - (cexp x + cexp y))) (cexp x + cexp y)).
+split;[assumption|split].
+rewrite Hr.
+simpl.
+clear Hr.
+apply Zle_trans with (cexp (x * y)%R - prec)%Z.
+unfold canonic_exp, FLX_exp.
+apply Zplus_le_compat_r.
+rewrite ln_beta_unique with (1 := Hexy).
+apply ln_beta_le_bpow with (1 := Hz).
+replace (bpow (exy - prec)) with (ulp beta (FLX_exp prec) (x * y)).
+apply ulp_error...
+unfold ulp, canonic_exp.
+now rewrite ln_beta_unique with (1 := Hexy).
+apply Hc1.
+reflexivity.
+Qed.
+
+(** Error of the multiplication in FLX *)
+Theorem mult_error_FLX :
+ forall x y,
+ format x -> format y ->
+ format (round beta (FLX_exp prec) rnd (x * y) - (x * y))%R.
+Proof.
+intros x y Hx Hy.
+destruct (Req_dec (round beta (FLX_exp prec) rnd (x * y) - x * y) 0) as [Hr0|Hr0].
+rewrite Hr0.
+apply generic_format_0.
+destruct (mult_error_FLX_aux x y Hx Hy Hr0) as ((m,e),(H1,(H2,H3))).
+rewrite <- H1.
+now apply generic_format_F2R.
+Qed.
+
+End Fprop_mult_error.
+
+Section Fprop_mult_error_FLT.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable emin prec : Z.
+Context { prec_gt_0_ : Prec_gt_0 prec }.
+Variable Hpemin: (emin <= prec)%Z.
+
+Notation format := (generic_format beta (FLT_exp emin prec)).
+Notation cexp := (canonic_exp beta (FLT_exp emin prec)).
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+(** Error of the multiplication in FLT with underflow requirements *)
+Theorem mult_error_FLT :
+ forall x y,
+ format x -> format y ->
+ (x*y = 0)%R \/ (bpow (emin + 2*prec - 1) <= Rabs (x * y))%R ->
+ format (round beta (FLT_exp emin prec) rnd (x * y) - (x * y))%R.
+Proof with auto with typeclass_instances.
+clear Hpemin.
+intros x y Hx Hy Hxy.
+set (f := (round beta (FLT_exp emin prec) rnd (x * y))).
+destruct (Req_dec (f - x * y) 0) as [Hr0|Hr0].
+rewrite Hr0.
+apply generic_format_0.
+destruct Hxy as [Hxy|Hxy].
+unfold f.
+rewrite Hxy.
+rewrite round_0...
+ring_simplify (0 - 0)%R.
+apply generic_format_0.
+destruct (mult_error_FLX_aux beta prec rnd x y) as ((m,e),(H1,(H2,H3))).
+now apply generic_format_FLX_FLT with emin.
+now apply generic_format_FLX_FLT with emin.
+rewrite <- (round_FLT_FLX beta emin).
+assumption.
+apply Rle_trans with (2:=Hxy).
+apply bpow_le.
+generalize (prec_gt_0 prec).
+clear ; omega.
+rewrite <- (round_FLT_FLX beta emin) in H1.
+2:apply Rle_trans with (2:=Hxy).
+2:apply bpow_le ; generalize (prec_gt_0 prec) ; clear ; omega.
+unfold f; rewrite <- H1.
+apply generic_format_F2R.
+intros _.
+simpl in H2, H3.
+unfold canonic_exp, FLT_exp.
+case (Zmax_spec (ln_beta beta (F2R (Float beta m e)) - prec) emin);
+ intros (M1,M2); rewrite M2.
+apply Zle_trans with (2:=H2).
+unfold canonic_exp, FLX_exp.
+apply Zle_refl.
+rewrite H3.
+unfold canonic_exp, FLX_exp.
+assert (Hxy0:(x*y <> 0)%R).
+contradict Hr0.
+unfold f.
+rewrite Hr0.
+rewrite round_0...
+ring.
+assert (Hx0: (x <> 0)%R).
+contradict Hxy0.
+now rewrite Hxy0, Rmult_0_l.
+assert (Hy0: (y <> 0)%R).
+contradict Hxy0.
+now rewrite Hxy0, Rmult_0_r.
+destruct (ln_beta beta x) as (ex,Ex) ; simpl.
+specialize (Ex Hx0).
+destruct (ln_beta beta y) as (ey,Ey) ; simpl.
+specialize (Ey Hy0).
+assert (emin + 2 * prec -1 < ex + ey)%Z.
+2: omega.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (1:=Hxy).
+rewrite Rabs_mult, bpow_plus.
+apply Rmult_le_0_lt_compat; try apply Rabs_pos.
+apply Ex.
+apply Ey.
+Qed.
+
+End Fprop_mult_error_FLT.
diff --git a/flocq/Prop/Fprop_plus_error.v b/flocq/Prop/Fprop_plus_error.v
new file mode 100644
index 0000000..d9dee7c
--- /dev/null
+++ b/flocq/Prop/Fprop_plus_error.v
@@ -0,0 +1,234 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Error of the rounded-to-nearest addition is representable. *)
+
+Require Import Fcore_Raux.
+Require Import Fcore_defs.
+Require Import Fcore_float_prop.
+Require Import Fcore_generic_fmt.
+Require Import Fcalc_ops.
+
+Section Fprop_plus_error.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable fexp : Z -> Z.
+Context { valid_exp : Valid_exp fexp }.
+
+Section round_repr_same_exp.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem round_repr_same_exp :
+ forall m e,
+ exists m',
+ round beta fexp rnd (F2R (Float beta m e)) = F2R (Float beta m' e).
+Proof with auto with typeclass_instances.
+intros m e.
+set (e' := canonic_exp beta fexp (F2R (Float beta m e))).
+unfold round, scaled_mantissa. fold e'.
+destruct (Zle_or_lt e' e) as [He|He].
+exists m.
+unfold F2R at 2. simpl.
+rewrite Rmult_assoc, <- bpow_plus.
+rewrite <- Z2R_Zpower. 2: omega.
+rewrite <- Z2R_mult, Zrnd_Z2R...
+unfold F2R. simpl.
+rewrite Z2R_mult.
+rewrite Rmult_assoc.
+rewrite Z2R_Zpower. 2: omega.
+rewrite <- bpow_plus.
+apply (f_equal (fun v => Z2R m * bpow v)%R).
+ring.
+exists ((rnd (Z2R m * bpow (e - e'))) * Zpower beta (e' - e))%Z.
+unfold F2R. simpl.
+rewrite Z2R_mult.
+rewrite Z2R_Zpower. 2: omega.
+rewrite 2!Rmult_assoc.
+rewrite <- 2!bpow_plus.
+apply (f_equal (fun v => _ * bpow v)%R).
+ring.
+Qed.
+
+End round_repr_same_exp.
+
+Context { monotone_exp : Monotone_exp fexp }.
+Notation format := (generic_format beta fexp).
+
+Variable choice : Z -> bool.
+
+Lemma plus_error_aux :
+ forall x y,
+ (canonic_exp beta fexp x <= canonic_exp beta fexp y)%Z ->
+ format x -> format y ->
+ format (round beta fexp (Znearest choice) (x + y) - (x + y))%R.
+Proof.
+intros x y.
+set (ex := canonic_exp beta fexp x).
+set (ey := canonic_exp beta fexp y).
+intros He Hx Hy.
+destruct (Req_dec (round beta fexp (Znearest choice) (x + y) - (x + y)) R0) as [H0|H0].
+rewrite H0.
+apply generic_format_0.
+set (mx := Ztrunc (scaled_mantissa beta fexp x)).
+set (my := Ztrunc (scaled_mantissa beta fexp y)).
+(* *)
+assert (Hxy: (x + y)%R = F2R (Float beta (mx + my * beta ^ (ey - ex)) ex)).
+rewrite Hx, Hy.
+fold mx my ex ey.
+rewrite <- F2R_plus.
+unfold Fplus. simpl.
+now rewrite Zle_imp_le_bool with (1 := He).
+(* *)
+rewrite Hxy.
+destruct (round_repr_same_exp (Znearest choice) (mx + my * beta ^ (ey - ex)) ex) as (mxy, Hxy').
+rewrite Hxy'.
+assert (H: (F2R (Float beta mxy ex) - F2R (Float beta (mx + my * beta ^ (ey - ex)) ex))%R =
+ F2R (Float beta (mxy - (mx + my * beta ^ (ey - ex))) ex)).
+now rewrite <- F2R_minus, Fminus_same_exp.
+rewrite H.
+apply generic_format_F2R.
+intros _.
+apply monotone_exp.
+rewrite <- H, <- Hxy', <- Hxy.
+apply ln_beta_le_abs.
+exact H0.
+pattern x at 3 ; replace x with (-(y - (x + y)))%R by ring.
+rewrite Rabs_Ropp.
+now apply (round_N_pt beta _ choice (x + y)).
+Qed.
+
+(** Error of the addition *)
+Theorem plus_error :
+ forall x y,
+ format x -> format y ->
+ format (round beta fexp (Znearest choice) (x + y) - (x + y))%R.
+Proof.
+intros x y Hx Hy.
+destruct (Zle_or_lt (canonic_exp beta fexp x) (canonic_exp beta fexp y)).
+now apply plus_error_aux.
+rewrite Rplus_comm.
+apply plus_error_aux ; try easy.
+now apply Zlt_le_weak.
+Qed.
+
+End Fprop_plus_error.
+
+Section Fprop_plus_zero.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Variable fexp : Z -> Z.
+Context { valid_exp : Valid_exp fexp }.
+Context { exp_not_FTZ : Exp_not_FTZ fexp }.
+Notation format := (generic_format beta fexp).
+
+Section round_plus_eq_zero_aux.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Lemma round_plus_eq_zero_aux :
+ forall x y,
+ (canonic_exp beta fexp x <= canonic_exp beta fexp y)%Z ->
+ format x -> format y ->
+ (0 <= x + y)%R ->
+ round beta fexp rnd (x + y) = R0 ->
+ (x + y = 0)%R.
+Proof with auto with typeclass_instances.
+intros x y He Hx Hy Hp Hxy.
+destruct (Req_dec (x + y) 0) as [H0|H0].
+exact H0.
+destruct (ln_beta beta (x + y)) as (exy, Hexy).
+simpl.
+specialize (Hexy H0).
+destruct (Zle_or_lt exy (fexp exy)) as [He'|He'].
+(* . *)
+assert (H: (x + y)%R = F2R (Float beta (Ztrunc (x * bpow (- fexp exy)) +
+ Ztrunc (y * bpow (- fexp exy))) (fexp exy))).
+rewrite (subnormal_exponent beta fexp exy x He' Hx) at 1.
+rewrite (subnormal_exponent beta fexp exy y He' Hy) at 1.
+now rewrite <- F2R_plus, Fplus_same_exp.
+rewrite H in Hxy.
+rewrite round_generic in Hxy...
+now rewrite <- H in Hxy.
+apply generic_format_F2R.
+intros _.
+rewrite <- H.
+unfold canonic_exp.
+rewrite ln_beta_unique with (1 := Hexy).
+apply Zle_refl.
+(* . *)
+elim Rle_not_lt with (1 := round_le beta _ rnd _ _ (proj1 Hexy)).
+rewrite (Rabs_pos_eq _ Hp).
+rewrite Hxy.
+rewrite round_generic...
+apply bpow_gt_0.
+apply generic_format_bpow.
+apply Zlt_succ_le.
+now rewrite (Zsucc_pred exy) in He'.
+Qed.
+
+End round_plus_eq_zero_aux.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+(** rnd(x+y)=0 -> x+y = 0 provided this is not a FTZ format *)
+Theorem round_plus_eq_zero :
+ forall x y,
+ format x -> format y ->
+ round beta fexp rnd (x + y) = R0 ->
+ (x + y = 0)%R.
+Proof with auto with typeclass_instances.
+intros x y Hx Hy.
+destruct (Rle_or_lt R0 (x + y)) as [H1|H1].
+(* . *)
+revert H1.
+destruct (Zle_or_lt (canonic_exp beta fexp x) (canonic_exp beta fexp y)) as [H2|H2].
+now apply round_plus_eq_zero_aux.
+rewrite Rplus_comm.
+apply round_plus_eq_zero_aux ; try easy.
+now apply Zlt_le_weak.
+(* . *)
+revert H1.
+rewrite <- (Ropp_involutive (x + y)), Ropp_plus_distr, <- Ropp_0.
+intros H1.
+rewrite round_opp.
+intros Hxy.
+apply f_equal.
+cut (round beta fexp (Zrnd_opp rnd) (- x + - y) = 0)%R.
+cut (0 <= -x + -y)%R.
+destruct (Zle_or_lt (canonic_exp beta fexp (-x)) (canonic_exp beta fexp (-y))) as [H2|H2].
+apply round_plus_eq_zero_aux ; try apply generic_format_opp...
+rewrite Rplus_comm.
+apply round_plus_eq_zero_aux ; try apply generic_format_opp...
+now apply Zlt_le_weak.
+apply Rlt_le.
+now apply Ropp_lt_cancel.
+rewrite <- (Ropp_involutive (round _ _ _ _)).
+rewrite Hxy.
+apply Ropp_involutive.
+Qed.
+
+End Fprop_plus_zero.
diff --git a/flocq/Prop/Fprop_relative.v b/flocq/Prop/Fprop_relative.v
new file mode 100644
index 0000000..8df7336
--- /dev/null
+++ b/flocq/Prop/Fprop_relative.v
@@ -0,0 +1,699 @@
+(**
+This file is part of the Flocq formalization of floating-point
+arithmetic in Coq: http://flocq.gforge.inria.fr/
+
+Copyright (C) 2010-2011 Sylvie Boldo
+#<br />#
+Copyright (C) 2010-2011 Guillaume Melquiond
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 3 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+COPYING file for more details.
+*)
+
+(** * Relative error of the roundings *)
+Require Import Fcore.
+
+Section Fprop_relative.
+
+Variable beta : radix.
+Notation bpow e := (bpow beta e).
+
+Section Fprop_relative_generic.
+
+Variable fexp : Z -> Z.
+Context { prop_exp : Valid_exp fexp }.
+
+Section relative_error_conversion.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Lemma relative_error_lt_conversion :
+ forall x b, (0 < b)%R ->
+ (Rabs (round beta fexp rnd x - x) < b * Rabs x)%R ->
+ exists eps,
+ (Rabs eps < b)%R /\ round beta fexp rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x b Hb0 Hxb.
+destruct (Req_dec x 0) as [Hx0|Hx0].
+(* *)
+exists R0.
+split.
+now rewrite Rabs_R0.
+rewrite Hx0, Rmult_0_l.
+apply round_0...
+(* *)
+exists ((round beta fexp rnd x - x) / x)%R.
+split. 2: now field.
+unfold Rdiv.
+rewrite Rabs_mult.
+apply Rmult_lt_reg_r with (Rabs x).
+now apply Rabs_pos_lt.
+rewrite Rmult_assoc, <- Rabs_mult.
+rewrite Rinv_l with (1 := Hx0).
+now rewrite Rabs_R1, Rmult_1_r.
+Qed.
+
+Lemma relative_error_le_conversion :
+ forall x b, (0 <= b)%R ->
+ (Rabs (round beta fexp rnd x - x) <= b * Rabs x)%R ->
+ exists eps,
+ (Rabs eps <= b)%R /\ round beta fexp rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x b Hb0 Hxb.
+destruct (Req_dec x 0) as [Hx0|Hx0].
+(* *)
+exists R0.
+split.
+now rewrite Rabs_R0.
+rewrite Hx0, Rmult_0_l.
+apply round_0...
+(* *)
+exists ((round beta fexp rnd x - x) / x)%R.
+split. 2: now field.
+unfold Rdiv.
+rewrite Rabs_mult.
+apply Rmult_le_reg_r with (Rabs x).
+now apply Rabs_pos_lt.
+rewrite Rmult_assoc, <- Rabs_mult.
+rewrite Rinv_l with (1 := Hx0).
+now rewrite Rabs_R1, Rmult_1_r.
+Qed.
+
+End relative_error_conversion.
+
+Variable emin p : Z.
+Hypothesis Hmin : forall k, (emin < k)%Z -> (p <= k - fexp k)%Z.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem relative_error :
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ (Rabs (round beta fexp rnd x - x) < bpow (-p + 1) * Rabs x)%R.
+Proof.
+intros x Hx.
+apply Rlt_le_trans with (ulp beta fexp x)%R.
+now apply ulp_error.
+unfold ulp, canonic_exp.
+assert (Hx': (x <> 0)%R).
+intros H.
+apply Rlt_not_le with (2 := Hx).
+rewrite H, Rabs_R0.
+apply bpow_gt_0.
+destruct (ln_beta beta x) as (ex, He).
+simpl.
+specialize (He Hx').
+apply Rle_trans with (bpow (-p + 1) * bpow (ex - 1))%R.
+rewrite <- bpow_plus.
+apply bpow_le.
+assert (emin < ex)%Z.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 He).
+exact Hx.
+generalize (Hmin ex).
+omega.
+apply Rmult_le_compat_l.
+apply bpow_ge_0.
+apply He.
+Qed.
+
+(** 1+#&epsilon;# property in any rounding *)
+Theorem relative_error_ex :
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ exists eps,
+ (Rabs eps < bpow (-p + 1))%R /\ round beta fexp rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_lt_conversion...
+apply bpow_gt_0.
+now apply relative_error.
+Qed.
+
+Theorem relative_error_F2R_emin :
+ forall m, let x := F2R (Float beta m emin) in
+ (x <> 0)%R ->
+ (Rabs (round beta fexp rnd x - x) < bpow (-p + 1) * Rabs x)%R.
+Proof.
+intros m x Hx.
+apply relative_error.
+unfold x.
+rewrite <- F2R_Zabs.
+apply bpow_le_F2R.
+apply F2R_lt_reg with beta emin.
+rewrite F2R_0, F2R_Zabs.
+now apply Rabs_pos_lt.
+Qed.
+
+Theorem relative_error_F2R_emin_ex :
+ forall m, let x := F2R (Float beta m emin) in
+ (x <> 0)%R ->
+ exists eps,
+ (Rabs eps < bpow (-p + 1))%R /\ round beta fexp rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros m x Hx.
+apply relative_error_lt_conversion...
+apply bpow_gt_0.
+now apply relative_error_F2R_emin.
+Qed.
+
+Theorem relative_error_round :
+ (0 < p)%Z ->
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ (Rabs (round beta fexp rnd x - x) < bpow (-p + 1) * Rabs (round beta fexp rnd x))%R.
+Proof with auto with typeclass_instances.
+intros Hp x Hx.
+apply Rlt_le_trans with (ulp beta fexp x)%R.
+now apply ulp_error.
+assert (Hx': (x <> 0)%R).
+intros H.
+apply Rlt_not_le with (2 := Hx).
+rewrite H, Rabs_R0.
+apply bpow_gt_0.
+unfold ulp, canonic_exp.
+destruct (ln_beta beta x) as (ex, He).
+simpl.
+specialize (He Hx').
+assert (He': (emin < ex)%Z).
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 He).
+exact Hx.
+apply Rle_trans with (bpow (-p + 1) * bpow (ex - 1))%R.
+rewrite <- bpow_plus.
+apply bpow_le.
+generalize (Hmin ex).
+omega.
+apply Rmult_le_compat_l.
+apply bpow_ge_0.
+generalize He.
+apply round_abs_abs...
+clear rnd valid_rnd x Hx Hx' He.
+intros rnd valid_rnd x Hx.
+rewrite <- (round_generic beta fexp rnd (bpow (ex - 1))).
+now apply round_le.
+apply generic_format_bpow.
+ring_simplify (ex - 1 + 1)%Z.
+generalize (Hmin ex).
+omega.
+Qed.
+
+Theorem relative_error_round_F2R_emin :
+ (0 < p)%Z ->
+ forall m, let x := F2R (Float beta m emin) in
+ (x <> 0)%R ->
+ (Rabs (round beta fexp rnd x - x) < bpow (-p + 1) * Rabs (round beta fexp rnd x))%R.
+Proof.
+intros Hp m x Hx.
+apply relative_error_round.
+exact Hp.
+unfold x.
+rewrite <- F2R_Zabs.
+apply bpow_le_F2R.
+apply F2R_lt_reg with beta emin.
+rewrite F2R_0, F2R_Zabs.
+now apply Rabs_pos_lt.
+Qed.
+
+Variable choice : Z -> bool.
+
+Theorem relative_error_N :
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * bpow (-p + 1) * Rabs x)%R.
+Proof.
+intros x Hx.
+apply Rle_trans with (/2 * ulp beta fexp x)%R.
+now apply ulp_half_error.
+rewrite Rmult_assoc.
+apply Rmult_le_compat_l.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+assert (Hx': (x <> 0)%R).
+intros H.
+apply Rlt_not_le with (2 := Hx).
+rewrite H, Rabs_R0.
+apply bpow_gt_0.
+unfold ulp, canonic_exp.
+destruct (ln_beta beta x) as (ex, He).
+simpl.
+specialize (He Hx').
+apply Rle_trans with (bpow (-p + 1) * bpow (ex - 1))%R.
+rewrite <- bpow_plus.
+apply bpow_le.
+assert (emin < ex)%Z.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 He).
+exact Hx.
+generalize (Hmin ex).
+omega.
+apply Rmult_le_compat_l.
+apply bpow_ge_0.
+apply He.
+Qed.
+
+(** 1+#&epsilon;# property in rounding to nearest *)
+Theorem relative_error_N_ex :
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ exists eps,
+ (Rabs eps <= /2 * bpow (-p + 1))%R /\ round beta fexp (Znearest choice) x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_le_conversion...
+apply Rlt_le.
+apply Rmult_lt_0_compat.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_gt_0.
+now apply relative_error_N.
+Qed.
+
+Theorem relative_error_N_F2R_emin :
+ forall m, let x := F2R (Float beta m emin) in
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * bpow (-p + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros m x.
+destruct (Req_dec x 0) as [Hx|Hx].
+(* . *)
+rewrite Hx, round_0...
+unfold Rminus.
+rewrite Rplus_0_l, Rabs_Ropp, Rabs_R0.
+rewrite Rmult_0_r.
+apply Rle_refl.
+(* . *)
+apply relative_error_N.
+unfold x.
+rewrite <- F2R_Zabs.
+apply bpow_le_F2R.
+apply F2R_lt_reg with beta emin.
+rewrite F2R_0, F2R_Zabs.
+now apply Rabs_pos_lt.
+Qed.
+
+Theorem relative_error_N_F2R_emin_ex :
+ forall m, let x := F2R (Float beta m emin) in
+ exists eps,
+ (Rabs eps <= /2 * bpow (-p + 1))%R /\ round beta fexp (Znearest choice) x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros m x.
+apply relative_error_le_conversion...
+apply Rlt_le.
+apply Rmult_lt_0_compat.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_gt_0.
+now apply relative_error_N_F2R_emin.
+Qed.
+
+Theorem relative_error_N_round :
+ (0 < p)%Z ->
+ forall x,
+ (bpow emin <= Rabs x)%R ->
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * bpow (-p + 1) * Rabs (round beta fexp (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros Hp x Hx.
+apply Rle_trans with (/2 * ulp beta fexp x)%R.
+now apply ulp_half_error.
+rewrite Rmult_assoc.
+apply Rmult_le_compat_l.
+apply Rlt_le.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+assert (Hx': (x <> 0)%R).
+intros H.
+apply Rlt_not_le with (2 := Hx).
+rewrite H, Rabs_R0.
+apply bpow_gt_0.
+unfold ulp, canonic_exp.
+destruct (ln_beta beta x) as (ex, He).
+simpl.
+specialize (He Hx').
+assert (He': (emin < ex)%Z).
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2 := proj2 He).
+exact Hx.
+apply Rle_trans with (bpow (-p + 1) * bpow (ex - 1))%R.
+rewrite <- bpow_plus.
+apply bpow_le.
+generalize (Hmin ex).
+omega.
+apply Rmult_le_compat_l.
+apply bpow_ge_0.
+generalize He.
+apply round_abs_abs...
+clear rnd valid_rnd x Hx Hx' He.
+intros rnd valid_rnd x Hx.
+rewrite <- (round_generic beta fexp rnd (bpow (ex - 1))).
+now apply round_le.
+apply generic_format_bpow.
+ring_simplify (ex - 1 + 1)%Z.
+generalize (Hmin ex).
+omega.
+Qed.
+
+Theorem relative_error_N_round_F2R_emin :
+ (0 < p)%Z ->
+ forall m, let x := F2R (Float beta m emin) in
+ (Rabs (round beta fexp (Znearest choice) x - x) <= /2 * bpow (-p + 1) * Rabs (round beta fexp (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros Hp m x.
+destruct (Req_dec x 0) as [Hx|Hx].
+(* . *)
+rewrite Hx, round_0...
+unfold Rminus.
+rewrite Rplus_0_l, Rabs_Ropp, Rabs_R0.
+rewrite Rmult_0_r.
+apply Rle_refl.
+(* . *)
+apply relative_error_N_round with (1 := Hp).
+unfold x.
+rewrite <- F2R_Zabs.
+apply bpow_le_F2R.
+apply F2R_lt_reg with beta emin.
+rewrite F2R_0, F2R_Zabs.
+now apply Rabs_pos_lt.
+Qed.
+
+End Fprop_relative_generic.
+
+Section Fprop_relative_FLT.
+
+Variable emin prec : Z.
+Variable Hp : Zlt 0 prec.
+
+Lemma relative_error_FLT_aux :
+ forall k, (emin + prec - 1 < k)%Z -> (prec <= k - FLT_exp emin prec k)%Z.
+Proof.
+intros k Hk.
+unfold FLT_exp.
+generalize (Zmax_spec (k - prec) emin).
+omega.
+Qed.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem relative_error_FLT_F2R_emin :
+ forall m, let x := F2R (Float beta m (emin + prec - 1)) in
+ (x <> 0)%R ->
+ (Rabs (round beta (FLT_exp emin prec) rnd x - x) < bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros m x Hx.
+apply relative_error_F2R_emin...
+apply relative_error_FLT_aux.
+Qed.
+
+Theorem relative_error_FLT :
+ forall x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ (Rabs (round beta (FLT_exp emin prec) rnd x - x) < bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error with (emin + prec - 1)%Z...
+apply relative_error_FLT_aux.
+Qed.
+
+Theorem relative_error_FLT_F2R_emin_ex :
+ forall m, let x := F2R (Float beta m (emin + prec - 1)) in
+ (x <> 0)%R ->
+ exists eps,
+ (Rabs eps < bpow (-prec + 1))%R /\ round beta (FLT_exp emin prec) rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros m x Hx.
+apply relative_error_lt_conversion...
+apply bpow_gt_0.
+now apply relative_error_FLT_F2R_emin.
+Qed.
+
+(** 1+#&epsilon;# property in any rounding in FLT *)
+Theorem relative_error_FLT_ex :
+ forall x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ exists eps,
+ (Rabs eps < bpow (-prec + 1))%R /\ round beta (FLT_exp emin prec) rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_lt_conversion...
+apply bpow_gt_0.
+now apply relative_error_FLT.
+Qed.
+
+Variable choice : Z -> bool.
+
+Theorem relative_error_N_FLT :
+ forall x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ (Rabs (round beta (FLT_exp emin prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_N with (emin + prec - 1)%Z...
+apply relative_error_FLT_aux.
+Qed.
+
+(** 1+#&epsilon;# property in rounding to nearest in FLT *)
+Theorem relative_error_N_FLT_ex :
+ forall x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ exists eps,
+ (Rabs eps <= /2 * bpow (-prec + 1))%R /\ round beta (FLT_exp emin prec) (Znearest choice) x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_le_conversion...
+apply Rlt_le.
+apply Rmult_lt_0_compat.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_gt_0.
+now apply relative_error_N_FLT.
+Qed.
+
+Theorem relative_error_N_FLT_round :
+ forall x,
+ (bpow (emin + prec - 1) <= Rabs x)%R ->
+ (Rabs (round beta (FLT_exp emin prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs (round beta (FLT_exp emin prec) (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_N_round with (emin + prec - 1)%Z...
+apply relative_error_FLT_aux.
+Qed.
+
+Theorem relative_error_N_FLT_F2R_emin :
+ forall m, let x := F2R (Float beta m (emin + prec - 1)) in
+ (Rabs (round beta (FLT_exp emin prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros m x.
+apply relative_error_N_F2R_emin...
+apply relative_error_FLT_aux.
+Qed.
+
+Theorem relative_error_N_FLT_F2R_emin_ex :
+ forall m, let x := F2R (Float beta m (emin + prec - 1)) in
+ exists eps,
+ (Rabs eps <= /2 * bpow (-prec + 1))%R /\ round beta (FLT_exp emin prec) (Znearest choice) x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros m x.
+apply relative_error_le_conversion...
+apply Rlt_le.
+apply Rmult_lt_0_compat.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_gt_0.
+now apply relative_error_N_FLT_F2R_emin.
+Qed.
+
+Theorem relative_error_N_FLT_round_F2R_emin :
+ forall m, let x := F2R (Float beta m (emin + prec - 1)) in
+ (Rabs (round beta (FLT_exp emin prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs (round beta (FLT_exp emin prec) (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros m x.
+apply relative_error_N_round_F2R_emin...
+apply relative_error_FLT_aux.
+Qed.
+
+Lemma error_N_FLT_aux :
+ forall x,
+ (0 < x)%R ->
+ exists eps, exists eta,
+ (Rabs eps <= /2 * bpow (-prec + 1))%R /\
+ (Rabs eta <= /2 * bpow (emin))%R /\
+ (eps*eta=0)%R /\
+ round beta (FLT_exp emin prec) (Znearest choice) x = (x * (1 + eps) + eta)%R.
+Proof.
+intros x Hx2.
+case (Rle_or_lt (bpow (emin+prec)) x); intros Hx.
+(* *)
+destruct relative_error_N_ex with (FLT_exp emin prec) (emin+prec)%Z prec choice x
+ as (eps,(Heps1,Heps2)).
+now apply FLT_exp_valid.
+intros; unfold FLT_exp.
+rewrite Zmax_left; omega.
+rewrite Rabs_right;[assumption|apply Rle_ge; now left].
+exists eps; exists 0%R.
+split;[assumption|split].
+rewrite Rabs_R0; apply Rmult_le_pos.
+auto with real.
+apply bpow_ge_0.
+split;[apply Rmult_0_r|idtac].
+now rewrite Rplus_0_r.
+(* *)
+exists 0%R; exists (round beta (FLT_exp emin prec) (Znearest choice) x - x)%R.
+split.
+rewrite Rabs_R0; apply Rmult_le_pos.
+auto with real.
+apply bpow_ge_0.
+split.
+apply Rle_trans with (/2*ulp beta (FLT_exp emin prec) x)%R.
+apply ulp_half_error.
+now apply FLT_exp_valid.
+apply Rmult_le_compat_l; auto with real.
+unfold ulp.
+apply bpow_le.
+unfold FLT_exp, canonic_exp.
+rewrite Zmax_right.
+omega.
+destruct (ln_beta beta x) as (e,He); simpl.
+assert (e-1 < emin+prec)%Z.
+apply (lt_bpow beta).
+apply Rle_lt_trans with (2:=Hx).
+rewrite <- (Rabs_right x).
+apply He; auto with real.
+apply Rle_ge; now left.
+omega.
+split;ring.
+Qed.
+
+End Fprop_relative_FLT.
+
+Section Fprop_relative_FLX.
+
+Variable prec : Z.
+Variable Hp : Zlt 0 prec.
+
+Lemma relative_error_FLX_aux :
+ forall k, (prec <= k - FLX_exp prec k)%Z.
+Proof.
+intros k.
+unfold FLX_exp.
+omega.
+Qed.
+
+Variable rnd : R -> Z.
+Context { valid_rnd : Valid_rnd rnd }.
+
+Theorem relative_error_FLX :
+ forall x,
+ (x <> 0)%R ->
+ (Rabs (round beta (FLX_exp prec) rnd x - x) < bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+destruct (ln_beta beta x) as (ex, He).
+specialize (He Hx).
+apply relative_error with (ex - 1)%Z...
+intros k _.
+apply relative_error_FLX_aux.
+apply He.
+Qed.
+
+(** 1+#&epsilon;# property in any rounding in FLX *)
+Theorem relative_error_FLX_ex :
+ forall x,
+ (x <> 0)%R ->
+ exists eps,
+ (Rabs eps < bpow (-prec + 1))%R /\ round beta (FLX_exp prec) rnd x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+apply relative_error_lt_conversion...
+apply bpow_gt_0.
+now apply relative_error_FLX.
+Qed.
+
+Theorem relative_error_FLX_round :
+ forall x,
+ (x <> 0)%R ->
+ (Rabs (round beta (FLX_exp prec) rnd x - x) < bpow (-prec + 1) * Rabs (round beta (FLX_exp prec) rnd x))%R.
+Proof with auto with typeclass_instances.
+intros x Hx.
+destruct (ln_beta beta x) as (ex, He).
+specialize (He Hx).
+apply relative_error_round with (ex - 1)%Z...
+intros k _.
+apply relative_error_FLX_aux.
+apply He.
+Qed.
+
+Variable choice : Z -> bool.
+
+Theorem relative_error_N_FLX :
+ forall x,
+ (Rabs (round beta (FLX_exp prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs x)%R.
+Proof with auto with typeclass_instances.
+intros x.
+destruct (Req_dec x 0) as [Hx|Hx].
+(* . *)
+rewrite Hx, round_0...
+unfold Rminus.
+rewrite Rplus_0_l, Rabs_Ropp, Rabs_R0.
+rewrite Rmult_0_r.
+apply Rle_refl.
+(* . *)
+destruct (ln_beta beta x) as (ex, He).
+specialize (He Hx).
+apply relative_error_N with (ex - 1)%Z...
+intros k _.
+apply relative_error_FLX_aux.
+apply He.
+Qed.
+
+(** 1+#&epsilon;# property in rounding to nearest in FLX *)
+Theorem relative_error_N_FLX_ex :
+ forall x,
+ exists eps,
+ (Rabs eps <= /2 * bpow (-prec + 1))%R /\ round beta (FLX_exp prec) (Znearest choice) x = (x * (1 + eps))%R.
+Proof with auto with typeclass_instances.
+intros x.
+apply relative_error_le_conversion...
+apply Rlt_le.
+apply Rmult_lt_0_compat.
+apply Rinv_0_lt_compat.
+now apply (Z2R_lt 0 2).
+apply bpow_gt_0.
+now apply relative_error_N_FLX.
+Qed.
+
+Theorem relative_error_N_FLX_round :
+ forall x,
+ (Rabs (round beta (FLX_exp prec) (Znearest choice) x - x) <= /2 * bpow (-prec + 1) * Rabs (round beta (FLX_exp prec) (Znearest choice) x))%R.
+Proof with auto with typeclass_instances.
+intros x.
+destruct (Req_dec x 0) as [Hx|Hx].
+(* . *)
+rewrite Hx, round_0...
+unfold Rminus.
+rewrite Rplus_0_l, Rabs_Ropp, Rabs_R0.
+rewrite Rmult_0_r.
+apply Rle_refl.
+(* . *)
+destruct (ln_beta beta x) as (ex, He).
+specialize (He Hx).
+apply relative_error_N_round with (ex - 1)%Z.
+now apply FLX_exp_valid.
+intros k _.
+apply relative_error_FLX_aux.
+exact Hp.
+apply He.
+Qed.
+
+End Fprop_relative_FLX.
+
+End Fprop_relative. \ No newline at end of file
diff --git a/ia32/PrintAsm.ml b/ia32/PrintAsm.ml
index 4e7c916..ccc3c0f 100644
--- a/ia32/PrintAsm.ml
+++ b/ia32/PrintAsm.ml
@@ -519,9 +519,9 @@ let print_instruction oc = function
| Pmovsd_ff(rd, r1) ->
fprintf oc " movapd %a, %a\n" freg r1 freg rd
| Pmovsd_fi(rd, n) ->
- let b = Int64.bits_of_float n in
+ let b = camlint64_of_coqint (Floats.Float.bits_of_double n) in
let lbl = new_label() in
- fprintf oc " movsd %a, %a %s %.18g\n" label lbl freg rd comment n;
+ fprintf oc " movsd %a, %a %s %.18g\n" label lbl freg rd comment (camlfloat_of_coqfloat n);
float_literals := (lbl, b) :: !float_literals
| Pmovsd_fm(rd, a) ->
fprintf oc " movsd %a, %a\n" addressing a freg rd
@@ -772,9 +772,13 @@ let print_init oc = function
| Init_int32 n ->
fprintf oc " .long %ld\n" (camlint_of_coqint n)
| Init_float32 n ->
- fprintf oc " .long %ld %s %.18g\n" (Int32.bits_of_float n) comment n
+ fprintf oc " .long %ld %s %.18g\n"
+ (camlint_of_coqint (Floats.Float.bits_of_single n))
+ comment (camlfloat_of_coqfloat n)
| Init_float64 n ->
- fprintf oc " .quad %Ld %s %.18g\n" (Int64.bits_of_float n) comment n
+ fprintf oc " .quad %Ld %s %.18g\n"
+ (camlint64_of_coqint (Floats.Float.bits_of_double n))
+ comment (camlfloat_of_coqfloat n)
| Init_space n ->
let n = camlint_of_z n in
if n > 0l then fprintf oc " .space %ld\n" n
diff --git a/ia32/PrintOp.ml b/ia32/PrintOp.ml
index 6e6ef3c..f76416a 100644
--- a/ia32/PrintOp.ml
+++ b/ia32/PrintOp.ml
@@ -63,7 +63,7 @@ let print_addressing reg pp = function
let print_operation reg pp = function
| Omove, [r1] -> reg pp r1
| Ointconst n, [] -> fprintf pp "%ld" (camlint_of_coqint n)
- | Ofloatconst n, [] -> fprintf pp "%F" n
+ | Ofloatconst n, [] -> fprintf pp "%F" (camlfloat_of_coqfloat n)
| Ocast8signed, [r1] -> fprintf pp "int8signed(%a)" reg r1
| Ocast8unsigned, [r1] -> fprintf pp "int8unsigned(%a)" reg r1
| Ocast16signed, [r1] -> fprintf pp "int16signed(%a)" reg r1
diff --git a/lib/Camlcoq.ml b/lib/Camlcoq.ml
index cfbca6e..57b1b56 100644
--- a/lib/Camlcoq.ml
+++ b/lib/Camlcoq.ml
@@ -18,6 +18,7 @@
open Datatypes
open BinPos
open BinInt
+open Floats
(* Integers *)
@@ -116,6 +117,13 @@ let camlstring_of_coqstring (s: char list) =
| c :: s -> r.[pos] <- c; fill (pos + 1) s
in fill 0 s
+(* Floats *)
+
+let coqfloat_of_camlfloat f =
+ Float.double_of_bits(coqint_of_camlint64(Int64.bits_of_float f))
+let camlfloat_of_coqfloat f =
+ Int64.float_of_bits(camlint64_of_coqint(Float.bits_of_double f))
+
(* Timing facility *)
(*
diff --git a/lib/Floataux.ml b/lib/Floataux.ml
deleted file mode 100644
index dfdd6ce..0000000
--- a/lib/Floataux.ml
+++ /dev/null
@@ -1,51 +0,0 @@
-(* *********************************************************************)
-(* *)
-(* The Compcert verified compiler *)
-(* *)
-(* Xavier Leroy, INRIA Paris-Rocquencourt *)
-(* *)
-(* Copyright Institut National de Recherche en Informatique et en *)
-(* Automatique. All rights reserved. This file is distributed *)
-(* under the terms of the GNU General Public License as published by *)
-(* the Free Software Foundation, either version 2 of the License, or *)
-(* (at your option) any later version. This file is also distributed *)
-(* under the terms of the INRIA Non-Commercial License Agreement. *)
-(* *)
-(* *********************************************************************)
-
-open Camlcoq
-
-let singleoffloat f =
- Int32.float_of_bits (Int32.bits_of_float f)
-
-let intoffloat f =
- if f < 2147483648.0 (*2^31 *) && f > -2147483649.0 (* -2^31-1 *)
- then Some (coqint_of_camlint (Int32.of_float f))
- else None
-
-let intuoffloat f =
- if f < 4294967296.0 (* 2^32 *) && f >= 0.0
- then Some (coqint_of_camlint (Int64.to_int32 (Int64.of_float f)))
- else None
-
-let floatofint i =
- Int32.to_float (camlint_of_coqint i)
-
-let floatofintu i =
- Int64.to_float (Int64.logand (Int64.of_int32 (camlint_of_coqint i))
- 0xFFFFFFFFL)
-
-let cmp c (x: float) (y: float) =
- match c with
- | Integers.Ceq -> x = y
- | Integers.Cne -> x <> y
- | Integers.Clt -> x < y
- | Integers.Cle -> x <= y
- | Integers.Cgt -> x > y
- | Integers.Cge -> x >= y
-
-let bits_of_single f = coqint_of_camlint (Int32.bits_of_float f)
-let single_of_bits f = Int32.float_of_bits (camlint_of_coqint f)
-
-let bits_of_double f = coqint_of_camlint64 (Int64.bits_of_float f)
-let double_of_bits f = Int64.float_of_bits (camlint64_of_coqint f)
diff --git a/lib/Floats.v b/lib/Floats.v
index f6af7bf..e7a7aa0 100644
--- a/lib/Floats.v
+++ b/lib/Floats.v
@@ -3,6 +3,7 @@
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
+(* Jacques-Henri Jourdan, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
@@ -13,49 +14,175 @@
(* *)
(* *********************************************************************)
-(** Axiomatization of floating-point numbers. *)
-
-(** In contrast with what we do with machine integers, we do not bother
- to formalize precisely IEEE floating-point arithmetic. Instead, we
- simply axiomatize a type [float] for IEEE double-precision floats
- and the associated operations. *)
+(** Formalization of floating-point numbers, using the Flocq library. *)
+Require Import Axioms.
Require Import Coqlib.
Require Import Integers.
+Require Import Reals.
+Require Import Fappli_IEEE.
+Require Import Fappli_IEEE_bits.
+Require Import Fcore.
+Require Import Fcalc_round.
+Require Import Fcalc_bracket.
+Require Import Fprop_Sterbenz.
+Require Import Program.
+Require Import Omega.
+
+Close Scope R_scope.
-Parameter float: Type. (**r the type of IEE754 doubles *)
+Definition float := binary64. (**r the type of IEE754 doubles *)
Module Float.
-Parameter zero: float. (**r the float [+0.0] *)
+Definition zero: float := B754_zero _ _ false. (**r the float [+0.0] *)
-Axiom eq_dec: forall (f1 f2: float), {f1 = f2} + {f1 <> f2}.
+Definition eq_dec: forall (f1 f2: float), {f1 = f2} + {f1 <> f2}.
+Proof.
+ Ltac try_not_eq := try solve [right; congruence].
+ destruct f1, f2;
+ try destruct b; try destruct b0;
+ try solve [left; auto]; try_not_eq;
+ destruct (positive_eq_dec m m0); try_not_eq;
+ destruct (Z_eq_dec e e1); try solve [right; intro H; inv H; congruence];
+ subst; left; rewrite (proof_irr e0 e2); auto.
+Defined.
(** Arithmetic operations *)
-Parameter neg: float -> float. (**r opposite (change sign) *)
-Parameter abs: float -> float. (**r absolute value (set sign to [+]) *)
-Parameter singleoffloat: float -> float. (**r conversion to single precision *)
-Parameter intoffloat: float -> option int. (**r conversion to signed 32-bit int *)
-Parameter intuoffloat: float -> option int. (**r conversion to unsigned 32-bit int *)
-Parameter floatofint: int -> float. (**r conversion from signed 32-bit int *)
-Parameter floatofintu: int -> float. (**r conversion from unsigned 32-bit int *)
-
-Parameter add: float -> float -> float. (**r addition *)
-Parameter sub: float -> float -> float. (**r subtraction *)
-Parameter mul: float -> float -> float. (**r multiplication *)
-Parameter div: float -> float -> float. (**r division *)
-
-Parameter cmp: comparison -> float -> float -> bool. (**r comparison *)
+Definition neg: float -> float := b64_opp. (**r opposite (change sign) *)
+Definition abs (x: float): float := (**r absolute value (set sign to [+]) *)
+ match x with
+ | B754_nan => x
+ | B754_infinity _ => B754_infinity _ _ false
+ | B754_finite _ m e H => B754_finite _ _ false m e H
+ | B754_zero _ => B754_zero _ _ false
+ end.
+
+Definition binary_normalize64 (m e:Z) (s:bool): float :=
+ binary_normalize 53 1024 eq_refl eq_refl mode_NE m e s.
+
+Definition binary_normalize64_correct (m e:Z) (s:bool) :=
+ binary_normalize_correct 53 1024 eq_refl eq_refl mode_NE m e s.
+Global Opaque binary_normalize64_correct.
+
+Definition floatofbinary32 (f: binary32) : float := (**r single precision embedding in double precision *)
+ match f with
+ | B754_nan => B754_nan _ _
+ | B754_infinity s => B754_infinity _ _ s
+ | B754_zero s => B754_zero _ _ s
+ | B754_finite s m e _ =>
+ binary_normalize64 (cond_Zopp s (Zpos m)) e s
+ end.
+
+Definition binary32offloat (f: float) : binary32 := (**r conversion to single precision *)
+ match f with
+ | B754_nan => B754_nan _ _
+ | B754_infinity s => B754_infinity _ _ s
+ | B754_zero s => B754_zero _ _ s
+ | B754_finite s m e _ =>
+ binary_normalize 24 128 eq_refl eq_refl mode_NE (cond_Zopp s (Zpos m)) e s
+ end.
+
+Definition singleoffloat (f: float): float := (**r conversion to single precision, embedded in double *)
+ floatofbinary32 (binary32offloat f).
+
+
+Definition Zoffloat (f:float): option Z := (**r conversion to Z *)
+ match f with
+ | B754_finite s m (Zpos e) _ => Some (cond_Zopp s (Zpos m) * Zpower_pos radix2 e)
+ | B754_finite s m 0 _ => Some (cond_Zopp s (Zpos m))
+ | B754_finite s m (Zneg e) _ => Some (cond_Zopp s (Zpos m / Zpower_pos radix2 e))
+ | B754_zero _ => Some 0
+ | _ => None
+ end.
+
+Definition intoffloat (f:float): option int := (**r conversion to signed 32-bit int *)
+ match Zoffloat f with
+ | Some n =>
+ if Zle_bool Int.min_signed n && Zle_bool n Int.max_signed then
+ Some (Int.repr n)
+ else
+ None
+ | None => None
+ end.
+
+Definition intuoffloat (f:float): option int := (**r conversion to unsigned 32-bit int *)
+ match Zoffloat f with
+ | Some n =>
+ if Zle_bool 0 n && Zle_bool n Int.max_unsigned then
+ Some (Int.repr n)
+ else
+ None
+ | None => None
+ end.
+
+Definition floatofint (n:int): float := (**r conversion from signed 32-bit int *)
+ binary_normalize64 (Int.signed n) 0 false.
+Definition floatofintu (n:int): float:= (**r conversion from unsigned 32-bit int *)
+ binary_normalize64 (Int.unsigned n) 0 false.
+Definition add: float -> float -> float := b64_plus mode_NE. (**r addition *)
+Definition sub: float -> float -> float := b64_minus mode_NE. (**r subtraction *)
+Definition mul: float -> float -> float := b64_mult mode_NE. (**r multiplication *)
+Definition div: float -> float -> float := b64_div mode_NE. (**r division *)
+
+Definition order_float (f1 f2:float): option Datatypes.comparison :=
+ match f1, f2 with
+ | B754_nan,_ | _,B754_nan => None
+ | B754_infinity true, B754_infinity true
+ | B754_infinity false, B754_infinity false => Some Eq
+ | B754_infinity true, _ => Some Lt
+ | B754_infinity false, _ => Some Gt
+ | _, B754_infinity true => Some Gt
+ | _, B754_infinity false => Some Lt
+ | B754_finite true _ _ _, B754_zero _ => Some Lt
+ | B754_finite false _ _ _, B754_zero _ => Some Gt
+ | B754_zero _, B754_finite true _ _ _ => Some Gt
+ | B754_zero _, B754_finite false _ _ _ => Some Lt
+ | B754_zero _, B754_zero _ => Some Eq
+ | B754_finite s1 m1 e1 _, B754_finite s2 m2 e2 _ =>
+ match s1, s2 with
+ | true, false => Some Lt
+ | false, true => Some Gt
+ | false, false =>
+ match Zcompare e1 e2 with
+ | Lt => Some Lt
+ | Gt => Some Gt
+ | Eq => Some (Pcompare m1 m2 Eq)
+ end
+ | true, true =>
+ match Zcompare e1 e2 with
+ | Lt => Some Gt
+ | Gt => Some Lt
+ | Eq => Some (CompOpp (Pcompare m1 m2 Eq))
+ end
+ end
+ end.
+
+Definition cmp (c:comparison) (f1 f2:float) : bool := (**r comparison *)
+ match c with
+ | Ceq =>
+ match order_float f1 f2 with Some Eq => true | _ => false end
+ | Cne =>
+ match order_float f1 f2 with Some Eq => false | _ => true end
+ | Clt =>
+ match order_float f1 f2 with Some Lt => true | _ => false end
+ | Cle =>
+ match order_float f1 f2 with Some(Lt|Eq) => true | _ => false end
+ | Cgt =>
+ match order_float f1 f2 with Some Gt => true | _ => false end
+ | Cge =>
+ match order_float f1 f2 with Some(Gt|Eq) => true | _ => false end
+ end.
(** Conversions between floats and their concrete in-memory representation
as a sequence of 64 bits (double precision) or 32 bits (single precision). *)
-Parameter bits_of_double: float -> int64.
-Parameter double_of_bits: int64 -> float.
+Definition bits_of_double (f: float): int64 := Int64.repr (bits_of_b64 f).
+Definition double_of_bits (b: int64): float := b64_of_bits (Int64.unsigned b).
-Parameter bits_of_single: float -> int.
-Parameter single_of_bits: int -> float.
+Definition bits_of_single (f: float) : int := Int.repr (bits_of_b32 (binary32offloat f)).
+Definition single_of_bits (b: int): float := floatofbinary32 (b32_of_bits (Int.unsigned b)).
Definition from_words (hi lo: int) : float :=
double_of_bits
@@ -65,40 +192,164 @@ Definition from_words (hi lo: int) : float :=
(** Below are the only properties of floating-point arithmetic that we
rely on in the compiler proof. *)
-Axiom addf_commut: forall f1 f2, add f1 f2 = add f2 f1.
+(** Some tactics **)
+
+Ltac compute_this val :=
+ let x := fresh in set val as x in *; vm_compute in x; subst x.
+
+Ltac smart_omega :=
+ simpl radix_val in *; simpl Zpower in *;
+ compute_this Int.modulus; compute_this Int.half_modulus;
+ compute_this Int.max_unsigned;
+ compute_this Int.min_signed; compute_this Int.max_signed;
+ compute_this Int64.modulus; compute_this Int64.half_modulus;
+ compute_this Int64.max_unsigned;
+ compute_this (Zpower_pos 2 1024); compute_this (Zpower_pos 2 53); compute_this (Zpower_pos 2 52);
+ omega.
+
+Theorem addf_commut: forall f1 f2, add f1 f2 = add f2 f1.
+Proof.
+ intros.
+ destruct f1, f2; simpl; try reflexivity; try (destruct b, b0; reflexivity).
+ rewrite Zplus_comm; rewrite Zmin_comm; reflexivity.
+Qed.
+
+Theorem subf_addf_opp: forall f1 f2, sub f1 f2 = add f1 (neg f2).
+Proof.
+ destruct f1, f2; reflexivity.
+Qed.
-Axiom subf_addf_opp: forall f1 f2, sub f1 f2 = add f1 (neg f2).
+Lemma floatofbinary32_exact :
+ forall f, is_finite_strict _ _ f = true ->
+ is_finite_strict _ _ (floatofbinary32 f) = true /\ B2R _ _ f = B2R _ _ (floatofbinary32 f).
+Proof.
+ destruct f as [ | | |s m e]; try discriminate; intro.
+ pose proof (binary_normalize64_correct (cond_Zopp s (Zpos m)) e s).
+ match goal with [H0:if Rlt_bool (Rabs ?x) _ then _ else _ |- _ /\ ?y = _] => assert (x=y)%R end.
+ apply round_generic; [now apply valid_rnd_round_mode|].
+ apply (generic_inclusion_ln_beta _ (FLT_exp (3 - 128 - 24) 24)).
+ intro; eapply Zle_trans; [apply Zle_max_compat_l | apply Zle_max_compat_r]; omega.
+ apply generic_format_canonic; apply canonic_canonic_mantissa; apply (proj1 (andb_prop _ _ e0)).
+ rewrite H1, Rlt_bool_true in H0; intuition; unfold floatofbinary32, binary_normalize64.
+ match goal with [ |- _ _ _ ?x = true ] => destruct x end; try discriminate.
+ symmetry in H2; apply F2R_eq_0_reg in H2; destruct s; discriminate.
+ reflexivity.
+ eapply Rlt_trans.
+ unfold B2R; rewrite <- F2R_Zabs, abs_cond_Zopp; eapply bounded_lt_emax; now apply e0.
+ now apply bpow_lt.
+Qed.
-Axiom singleoffloat_idem:
+Lemma binary32offloatofbinary32 :
+ forall f, binary32offloat (floatofbinary32 f) = f.
+Proof.
+ intro; pose proof (floatofbinary32_exact f); destruct f as [ | | |s m e]; try reflexivity.
+ specialize (H eq_refl); destruct H.
+ destruct (floatofbinary32 (B754_finite 24 128 s m e e0)) as [ | | |s1 m1 e1]; try discriminate.
+ unfold binary32offloat.
+ pose proof (binary_normalize_correct 24 128 eq_refl eq_refl mode_NE (cond_Zopp s1 (Zpos m1)) e1 s1).
+ unfold B2R at 2 in H0; cbv iota zeta beta in H0; rewrite <- H0, round_generic in H1.
+ rewrite Rlt_bool_true in H1.
+ apply B2R_inj; intuition; match goal with [|- _ _ _ ?f = true] => destruct f end; try discriminate.
+ symmetry in H2; apply F2R_eq_0_reg in H2; destruct s; discriminate.
+ reflexivity.
+ unfold B2R; rewrite <- F2R_Zabs, abs_cond_Zopp; eapply bounded_lt_emax; apply e0.
+ now apply valid_rnd_round_mode.
+ now apply generic_format_B2R.
+Qed.
+
+Theorem singleoffloat_idem:
forall f, singleoffloat (singleoffloat f) = singleoffloat f.
+Proof.
+ intros; unfold singleoffloat; rewrite binary32offloatofbinary32; reflexivity.
+Qed.
(** Properties of comparisons. *)
-Axiom cmp_swap:
+Theorem order_float_finite_correct:
+ forall f1 f2, is_finite _ _ f1 = true -> is_finite _ _ f2 = true ->
+ match order_float f1 f2 with
+ | Some c => Rcompare (B2R _ _ f1) (B2R _ _ f2) = c
+ | None => False
+ end.
+Proof.
+ Ltac apply_Rcompare :=
+ match goal with
+ | [ |- Rcompare _ _ = Lt ] => apply Rcompare_Lt
+ | [ |- Rcompare _ _ = Eq ] => apply Rcompare_Eq
+ | [ |- Rcompare _ _ = Gt ] => apply Rcompare_Gt
+ end.
+ unfold order_float; intros.
+ destruct f1, f2; try discriminate; unfold B2R, F2R, Fnum, Fexp, cond_Zopp;
+ try (replace 0%R with (Z2R 0 * bpow radix2 e)%R by (simpl Z2R; ring);
+ rewrite Rcompare_mult_r by (apply bpow_gt_0); rewrite Rcompare_Z2R).
+ apply_Rcompare; reflexivity.
+ destruct b0; reflexivity.
+ destruct b; reflexivity.
+ clear H H0.
+ apply andb_prop in e0; destruct e0; apply (canonic_canonic_mantissa _ _ false) in H.
+ apply andb_prop in e2; destruct e2; apply (canonic_canonic_mantissa _ _ false) in H1.
+ pose proof (Zcompare_spec e e1); unfold canonic, Fexp in H1, H.
+ assert (forall m1 m2 e1 e2,
+ let x := (Z2R (Zpos m1) * bpow radix2 e1)%R in
+ let y := (Z2R (Zpos m2) * bpow radix2 e2)%R in
+ canonic_exp radix2 (FLT_exp (3-1024-53) 53) x < canonic_exp radix2 (FLT_exp (3-1024-53) 53) y -> (x < y)%R).
+ intros; apply Rnot_le_lt; intro; apply (ln_beta_le radix2) in H5.
+ apply (fexp_monotone 53 1024) in H5; unfold canonic_exp in H4; omega.
+ apply Rmult_gt_0_compat; [apply (Z2R_lt 0); reflexivity|now apply bpow_gt_0].
+ assert (forall m1 m2 e1 e2, (Z2R (- Zpos m1) * bpow radix2 e1 < Z2R (Zpos m2) * bpow radix2 e2)%R).
+ intros; apply (Rlt_trans _ 0%R).
+ replace 0%R with (0*bpow radix2 e0)%R by ring; apply Rmult_lt_compat_r;
+ [apply bpow_gt_0; reflexivity|now apply (Z2R_lt _ 0)].
+ apply Rmult_gt_0_compat; [apply (Z2R_lt 0); reflexivity|now apply bpow_gt_0].
+ destruct b, b0; try (now apply_Rcompare; apply H5); inversion H3;
+ try (apply_Rcompare; apply H4; rewrite H, H1 in H7; assumption);
+ try (apply_Rcompare; do 2 rewrite Z2R_opp, Ropp_mult_distr_l_reverse;
+ apply Ropp_lt_contravar; apply H4; rewrite H, H1 in H7; assumption);
+ rewrite H7, Rcompare_mult_r, Rcompare_Z2R by (apply bpow_gt_0); reflexivity.
+Qed.
+
+Theorem cmp_swap:
forall c x y, Float.cmp (swap_comparison c) x y = Float.cmp c y x.
-Axiom cmp_ne_eq:
+Proof.
+ destruct c, x, y; simpl; try destruct b; try destruct b0; try reflexivity;
+ rewrite <- (Zcompare_antisym e e1); destruct (e ?= e1); try reflexivity;
+ change Eq with (CompOpp Eq); rewrite <- (Pcompare_antisym m m0 Eq);
+ simpl; destruct ((m ?= m0)%positive Eq); try reflexivity.
+Qed.
+
+Theorem cmp_ne_eq:
forall f1 f2, cmp Cne f1 f2 = negb (cmp Ceq f1 f2).
-Axiom cmp_lt_eq_false:
- forall x y, cmp Clt x y = true -> cmp Ceq x y = true -> False.
-Axiom cmp_le_lt_eq:
+Proof.
+ unfold cmp; intros; destruct (order_float f1 f2) as [ [] | ]; reflexivity.
+Qed.
+
+Theorem cmp_lt_eq_false:
+ forall f1 f2, cmp Clt f1 f2 = true -> cmp Ceq f1 f2 = true -> False.
+Proof.
+ unfold cmp; intros; destruct (order_float f1 f2) as [ [] | ]; discriminate.
+Qed.
+
+Theorem cmp_le_lt_eq:
forall f1 f2, cmp Cle f1 f2 = cmp Clt f1 f2 || cmp Ceq f1 f2.
+Proof.
+ unfold cmp; intros; destruct (order_float f1 f2) as [ [] | ]; reflexivity.
+Qed.
Corollary cmp_gt_eq_false:
forall x y, cmp Cgt x y = true -> cmp Ceq x y = true -> False.
Proof.
- intros. rewrite <- cmp_swap in H. rewrite <- cmp_swap in H0.
- eapply cmp_lt_eq_false; eauto.
+ intros; rewrite <- cmp_swap in H; rewrite <- cmp_swap in H0;
+ eapply cmp_lt_eq_false; now eauto.
Qed.
Corollary cmp_ge_gt_eq:
forall f1 f2, cmp Cge f1 f2 = cmp Cgt f1 f2 || cmp Ceq f1 f2.
Proof.
intros.
- change Cge with (swap_comparison Cle).
- change Cgt with (swap_comparison Clt).
- change Ceq with (swap_comparison Ceq).
- repeat rewrite cmp_swap.
- apply cmp_le_lt_eq.
+ change Cge with (swap_comparison Cle); change Cgt with (swap_comparison Clt);
+ change Ceq with (swap_comparison Ceq).
+ repeat rewrite cmp_swap.
+ now apply cmp_le_lt_eq.
Qed.
(** Properties of conversions to/from in-memory representation.
@@ -106,15 +357,62 @@ Qed.
The single-precision conversions lose precision exactly
as described by [singleoffloat] rounding. *)
-Axiom double_of_bits_of_double:
+Theorem double_of_bits_of_double:
forall f, double_of_bits (bits_of_double f) = f.
-Axiom single_of_bits_of_single:
+Proof.
+ intros; unfold double_of_bits, bits_of_double, bits_of_b64, b64_of_bits.
+ rewrite Int64.unsigned_repr, binary_float_of_bits_of_binary_float; [reflexivity|].
+ destruct f.
+ simpl; try destruct b; vm_compute; split; congruence.
+ simpl; try destruct b; vm_compute; split; congruence.
+ simpl; vm_compute; split; congruence.
+ unfold bits_of_binary_float, join_bits.
+ destruct (andb_prop _ _ e0); apply Zle_bool_imp_le in H0; apply Zeq_bool_eq in H; unfold FLT_exp in H.
+ match goal with [H:Zmax ?x ?y = e|-_] => pose proof (Zle_max_l x y); pose proof (Zle_max_r x y) end.
+ rewrite H, Fcalc_digits.Z_of_nat_S_digits2_Pnat in *.
+ lapply (Fcalc_digits.Zpower_gt_Zdigits radix2 53 (Zpos m)). intro.
+ unfold radix2, radix_val, Zabs in H3.
+ pose proof (Zle_bool_spec (2 ^ 52) (Zpos m)).
+ assert (Zpos m > 0); [vm_compute; exact eq_refl|].
+ compute_this (2^11); compute_this (2^(11-1)).
+ inversion H4; fold (2^52) in *; destruct H6; destruct b; now smart_omega.
+ change Fcalc_digits.radix2 with radix2 in H1; omega.
+Qed.
+
+Theorem single_of_bits_of_single:
forall f, single_of_bits (bits_of_single f) = singleoffloat f.
+Proof.
+ intros; unfold single_of_bits, bits_of_single, bits_of_b32, b32_of_bits.
+ rewrite Int.unsigned_repr, binary_float_of_bits_of_binary_float; [reflexivity|].
+ destruct (binary32offloat f).
+ simpl; try destruct b; vm_compute; split; congruence.
+ simpl; try destruct b; vm_compute; split; congruence.
+ simpl; vm_compute; split; congruence.
+ unfold bits_of_binary_float, join_bits.
+ destruct (andb_prop _ _ e0); apply Zle_bool_imp_le in H0; apply Zeq_bool_eq in H.
+ unfold FLT_exp in H.
+ match goal with [H:Zmax ?x ?y = e|-_] => pose proof (Zle_max_l x y); pose proof (Zle_max_r x y) end.
+ rewrite H, Fcalc_digits.Z_of_nat_S_digits2_Pnat in *.
+ lapply (Fcalc_digits.Zpower_gt_Zdigits radix2 24 (Zpos m)). intro.
+ unfold radix2, radix_val, Zabs in H3.
+ pose proof (Zle_bool_spec (2 ^ 23) (Zpos m)).
+ compute_this (2^23); compute_this (2^24); compute_this (2^8); compute_this (2^(8-1)).
+ assert (Zpos m > 0); [exact eq_refl|].
+ inversion H4; destruct b; now smart_omega.
+ change Fcalc_digits.radix2 with radix2 in H1; omega.
+Qed.
-Axiom bits_of_singleoffloat:
+Theorem bits_of_singleoffloat:
forall f, bits_of_single (singleoffloat f) = bits_of_single f.
-Axiom singleoffloat_of_bits:
+Proof.
+ intro; unfold singleoffloat, bits_of_single; rewrite binary32offloatofbinary32; reflexivity.
+Qed.
+
+Theorem singleoffloat_of_bits:
forall b, singleoffloat (single_of_bits b) = single_of_bits b.
+Proof.
+ intro; unfold singleoffloat, single_of_bits; rewrite binary32offloatofbinary32; reflexivity.
+Qed.
(** Conversions between floats and unsigned ints can be defined
in terms of conversions between floats and signed ints.
@@ -123,28 +421,271 @@ Axiom singleoffloat_of_bits:
Definition ox8000_0000 := Int.repr Int.half_modulus. (**r [0x8000_0000] *)
-Axiom floatofintu_floatofint_1:
+Lemma round_exact:
+ forall n, -2^52 < n < 2^52 ->
+ round radix2 (FLT_exp (3 - 1024 - 53) 53)
+ (round_mode mode_NE) (Z2R n) = Z2R n.
+Proof.
+ intros; rewrite round_generic; [reflexivity|now apply valid_rnd_round_mode|].
+ apply generic_format_FLT; exists (Float radix2 n 0).
+ unfold F2R, Fnum, Fexp, bpow; rewrite Rmult_1_r; intuition.
+ pose proof (Zabs_spec n); now smart_omega.
+Qed.
+
+Lemma binary_normalize64_exact:
+ forall n, -2^52 < n < 2^52 ->
+ B2R _ _ (binary_normalize64 n 0 false) = Z2R n /\
+ is_finite _ _ (binary_normalize64 n 0 false) = true.
+Proof.
+ intros; pose proof (binary_normalize64_correct n 0 false).
+ unfold F2R, Fnum, Fexp, bpow in H0; rewrite Rmult_1_r, round_exact, Rlt_bool_true in H0; try assumption.
+ rewrite <- Z2R_abs; apply Z2R_lt; pose proof (Zabs_spec n); now smart_omega.
+Qed.
+
+Theorem floatofintu_floatofint_1:
forall x,
Int.ltu x ox8000_0000 = true ->
floatofintu x = floatofint x.
+Proof.
+ unfold floatofintu, floatofint, Int.signed, Int.ltu; intro.
+ change (Int.unsigned ox8000_0000) with Int.half_modulus.
+ destruct (zlt (Int.unsigned x) Int.half_modulus); now intuition.
+Qed.
-Axiom floatofintu_floatofint_2:
+Theorem floatofintu_floatofint_2:
forall x,
Int.ltu x ox8000_0000 = false ->
floatofintu x = add (floatofint (Int.sub x ox8000_0000))
(floatofintu ox8000_0000).
+Proof.
+ unfold floatofintu, floatofint, Int.signed, Int.ltu, Int.sub; intros.
+ pose proof (Int.unsigned_range x).
+ compute_this (Int.unsigned ox8000_0000).
+ destruct (zlt (Int.unsigned x) 2147483648); try discriminate.
+ rewrite Int.unsigned_repr by smart_omega.
+ destruct (zlt ((Int.unsigned x) - 2147483648) Int.half_modulus).
+ unfold add, b64_plus.
+ match goal with [|- _ = Bplus _ _ _ _ _ ?x ?y] =>
+ pose proof (Bplus_correct 53 1024 eq_refl eq_refl mode_NE x y) end.
+ do 2 rewrite (fun x H => proj1 (binary_normalize64_exact x H)) in H1 by smart_omega.
+ do 2 rewrite (fun x H => proj2 (binary_normalize64_exact x H)) in H1 by smart_omega.
+ rewrite <- Z2R_plus, round_exact in H1 by smart_omega.
+ rewrite Rlt_bool_true in H1;
+ replace (Int.unsigned x - 2147483648 + 2147483648) with (Int.unsigned x) in * by ring.
+ apply B2R_inj.
+ destruct (binary_normalize64_exact (Int.unsigned x)); [now smart_omega|].
+ match goal with [|- _ _ _ ?f = _] => destruct f end; intuition.
+ exfalso; simpl in H2; change 0%R with (Z2R 0) in H2; apply eq_Z2R in H2; omega.
+ simpl Zcompare in *.
+ match goal with [|- _ _ _ ?f = _] => destruct f end; intuition.
+ exfalso; simpl in H0; change 0%R with (Z2R 0) in H0; apply eq_Z2R in H0; omega.
+ rewrite (fun x H => proj1 (binary_normalize64_exact x H)) by smart_omega; now intuition.
+ rewrite <- Z2R_Zpower, <- Z2R_abs by omega; apply Z2R_lt;
+ pose proof (Zabs_spec (Int.unsigned x)); now smart_omega.
+ exfalso; now smart_omega.
+Qed.
-Axiom intuoffloat_intoffloat_1:
+Theorem Zoffloat_correct:
+ forall f,
+ match Zoffloat f with
+ | Some n =>
+ is_finite _ _ f = true /\
+ Z2R n = round radix2 (FIX_exp 0) (round_mode mode_ZR) (B2R _ _ f)
+ | None =>
+ is_finite _ _ f = false
+ end.
+Proof.
+ destruct f; try now intuition.
+ simpl B2R. rewrite round_0. now intuition. now apply valid_rnd_round_mode.
+ destruct e. split. reflexivity.
+ rewrite round_generic. symmetry. now apply Rmult_1_r.
+ now apply valid_rnd_round_mode.
+ apply generic_format_FIX. exists (Float radix2 (cond_Zopp b (Zpos m)) 0). split; reflexivity.
+ split; [reflexivity|].
+ rewrite round_generic, Z2R_mult, Z2R_Zpower_pos, <- bpow_powerRZ;
+ [reflexivity|now apply valid_rnd_round_mode|apply generic_format_F2R; discriminate].
+ rewrite (inbetween_float_ZR_sign _ _ _ ((Zpos m) / Zpower_pos radix2 p)
+ (new_location (Zpower_pos radix2 p) (Zpos m mod Zpower_pos radix2 p) loc_Exact)).
+ unfold B2R, F2R, Fnum, Fexp, canonic_exp, bpow, FIX_exp, Zoffloat, radix2, radix_val.
+ pose proof (Rlt_bool_spec (Z2R (cond_Zopp b (Zpos m)) * / Z2R (Zpower_pos 2 p)) 0).
+ inversion H; rewrite <- (Rmult_0_l (bpow radix2 (Zneg p))) in H1.
+ apply Rmult_lt_reg_r in H1. apply (lt_Z2R _ 0) in H1.
+ destruct b; [split; [|ring_simplify];reflexivity|discriminate].
+ now apply bpow_gt_0.
+ apply Rmult_le_reg_r in H1. apply (le_Z2R 0) in H1.
+ destruct b; [destruct H1|split; [|ring_simplify]]; reflexivity.
+ now apply (bpow_gt_0 radix2 (Zneg p)).
+ unfold canonic_exp, FIX_exp; replace 0 with (Zneg p + Zpos p) by apply Zplus_opp_r.
+ apply (inbetween_float_new_location radix2 _ _ _ _ (Zpos p)); [reflexivity|].
+ apply inbetween_Exact; unfold B2R, F2R, Fnum, Fexp; destruct b.
+ rewrite Rabs_left; [simpl; ring_simplify; reflexivity|].
+ replace 0%R with (0*(bpow radix2 (Zneg p)))%R by ring; apply Rmult_gt_compat_r.
+ now apply bpow_gt_0.
+ apply (Z2R_lt _ 0); reflexivity.
+ apply Rabs_right; replace 0%R with (0*(bpow radix2 (Zneg p)))%R by ring; apply Rgt_ge.
+ apply Rmult_gt_compat_r; [now apply bpow_gt_0|apply (Z2R_lt 0); reflexivity].
+Qed.
+
+Theorem intoffloat_correct:
+ forall f,
+ match intoffloat f with
+ | Some n =>
+ is_finite _ _ f = true /\
+ Z2R (Int.signed n) = round radix2 (FIX_exp 0) (round_mode mode_ZR) (B2R _ _ f)
+ | None =>
+ is_finite _ _ f = false \/
+ (B2R _ _ f <= Z2R (Zpred Int.min_signed)\/
+ Z2R (Zsucc Int.max_signed) <= B2R _ _ f)%R
+ end.
+Proof.
+ intro; pose proof (Zoffloat_correct f); unfold intoffloat; destruct (Zoffloat f).
+ pose proof (Zle_bool_spec Int.min_signed z); pose proof (Zle_bool_spec z Int.max_signed).
+ compute_this Int.min_signed; compute_this Int.max_signed; destruct H.
+ inversion H0; [inversion H1|].
+ rewrite <- (Int.signed_repr z) in H2 by smart_omega; split; assumption.
+ right; right; eapply Rle_trans; [apply Z2R_le; apply Zlt_le_succ; now apply H6|].
+ rewrite H2, round_ZR_pos.
+ unfold round, scaled_mantissa, canonic_exp, FIX_exp, F2R, Fnum, Fexp; simpl bpow.
+ do 2 rewrite Rmult_1_r; now apply Zfloor_lb.
+ apply Rnot_lt_le; intro; apply Rlt_le in H7; apply (round_le radix2 (FIX_exp 0) (round_mode mode_ZR)) in H7;
+ rewrite <- H2, round_0 in H7; [apply (le_Z2R _ 0) in H7; now smart_omega|now apply valid_rnd_round_mode].
+ right; left; eapply Rle_trans; [|apply (Z2R_le z); simpl; omega].
+ rewrite H2, round_ZR_neg.
+ unfold round, scaled_mantissa, canonic_exp, FIX_exp, F2R, Fnum, Fexp; simpl bpow.
+ do 2 rewrite Rmult_1_r; now apply Zceil_ub.
+ apply Rnot_lt_le; intro; apply Rlt_le in H5; apply (round_le radix2 (FIX_exp 0) (round_mode mode_ZR)) in H5.
+ rewrite <- H2, round_0 in H5; [apply (le_Z2R 0) in H5; omega|now apply valid_rnd_round_mode].
+ left; assumption.
+Qed.
+
+Theorem intuoffloat_correct:
+ forall f,
+ match intuoffloat f with
+ | Some n =>
+ is_finite _ _ f = true /\
+ Z2R (Int.unsigned n) = round radix2 (FIX_exp 0) (round_mode mode_ZR) (B2R _ _ f)
+ | None =>
+ is_finite _ _ f = false \/
+ (B2R _ _ f <= -1 \/
+ Z2R (Zsucc Int.max_unsigned) <= B2R _ _ f)%R
+ end.
+Proof.
+ intro; pose proof (Zoffloat_correct f); unfold intuoffloat; destruct (Zoffloat f).
+ pose proof (Zle_bool_spec 0 z); pose proof (Zle_bool_spec z Int.max_unsigned).
+ compute_this Int.max_unsigned; destruct H.
+ inversion H0. inversion H1.
+ rewrite <- (Int.unsigned_repr z) in H2 by smart_omega; split; assumption.
+ right; right; eapply Rle_trans; [apply Z2R_le; apply Zlt_le_succ; now apply H6|].
+ rewrite H2, round_ZR_pos.
+ unfold round, scaled_mantissa, canonic_exp, FIX_exp, F2R, Fnum, Fexp; simpl bpow;
+ do 2 rewrite Rmult_1_r; now apply Zfloor_lb.
+ apply Rnot_lt_le; intro; apply Rlt_le in H7; eapply (round_le radix2 (FIX_exp 0) (round_mode mode_ZR)) in H7;
+ rewrite <- H2, round_0 in H7; [apply (le_Z2R _ 0) in H7; now smart_omega|now apply valid_rnd_round_mode].
+ right; left; eapply Rle_trans; [|change (-1)%R with (Z2R (-1)); apply (Z2R_le z); omega].
+ rewrite H2, round_ZR_neg; unfold round, scaled_mantissa, canonic_exp, FIX_exp, F2R, Fnum, Fexp; simpl bpow.
+ do 2 rewrite Rmult_1_r; now apply Zceil_ub.
+ apply Rnot_lt_le; intro; apply Rlt_le in H5; apply (round_le radix2 (FIX_exp 0) (round_mode mode_ZR)) in H5;
+ rewrite <- H2, round_0 in H5; [apply (le_Z2R 0) in H5; omega|now apply valid_rnd_round_mode].
+ left; assumption.
+Qed.
+
+Lemma intuoffloat_interval:
+ forall f n,
+ intuoffloat f = Some n ->
+ (-1 < B2R _ _ f < Z2R (Zsucc Int.max_unsigned))%R.
+Proof.
+ intro; pose proof (intuoffloat_correct f); destruct (intuoffloat f); try discriminate; destruct H.
+ destruct f; try discriminate; intros.
+ simpl B2R; change 0%R with (Z2R 0); change (-1)%R with (Z2R (-1)); split; apply Z2R_lt; reflexivity.
+ pose proof (Int.unsigned_range i).
+ unfold round, scaled_mantissa, B2R, F2R, Fnum, Fexp in H0 |- *; simpl bpow in H0; do 2 rewrite Rmult_1_r in H0;
+ apply eq_Z2R in H0.
+ split; apply Rnot_le_lt; intro.
+ rewrite Ztrunc_ceil in H0;
+ [apply Zceil_le in H3; change (-1)%R with (Z2R (-1)) in H3; rewrite Zceil_Z2R in H3; omega|].
+ eapply Rle_trans; [now apply H3|apply (Z2R_le (-1) 0); discriminate].
+ rewrite Ztrunc_floor in H0; [apply Zfloor_le in H3; rewrite Zfloor_Z2R in H3; now smart_omega|].
+ eapply Rle_trans; [|now apply H3]; apply (Z2R_le 0); discriminate.
+Qed.
+
+Theorem intuoffloat_intoffloat_1:
forall x n,
cmp Clt x (floatofintu ox8000_0000) = true ->
intuoffloat x = Some n ->
intoffloat x = Some n.
+Proof.
+ intros; unfold cmp in H; pose proof (order_float_finite_correct x (floatofintu ox8000_0000)).
+ destruct (order_float x (floatofintu ox8000_0000)); try destruct c; try discriminate.
+ pose proof (intuoffloat_correct x); rewrite H0 in H2; destruct H2.
+ specialize (H1 H2 eq_refl); pose proof (intoffloat_correct x); destruct (intoffloat x).
+ f_equal; rewrite <- (proj2 H4) in H3; apply eq_Z2R in H3.
+ pose proof (eq_refl (Int.repr (Int.unsigned n))); rewrite H3 in H5 at 1.
+ rewrite Int.repr_signed, Int.repr_unsigned in H5; assumption.
+ destruct H4; [rewrite H2 in H4; discriminate|].
+ apply intuoffloat_interval in H0; exfalso; destruct H0, H4.
+ eapply Rlt_le_trans in H0; [|now apply H4]; apply (lt_Z2R (-1)) in H0; discriminate.
+ apply Rcompare_Lt_inv in H1; eapply Rle_lt_trans in H1; [|now apply H4].
+ unfold floatofintu in H1; rewrite (fun x H => proj1 (binary_normalize64_exact x H)) in H1;
+ [apply lt_Z2R in H1; discriminate|split; reflexivity].
+Qed.
+
+Lemma Zfloor_minus :
+ forall x n, Zfloor(x-Z2R n) = Zfloor(x)-n.
+Proof.
+ intros; apply Zfloor_imp; replace (Zfloor x - n + 1) with (Zfloor x + 1 - n) by ring; do 2 rewrite Z2R_minus.
+ split;
+ [apply Rplus_le_compat_r; now apply Zfloor_lb|
+ apply Rplus_lt_compat_r; rewrite Z2R_plus; now apply Zfloor_ub].
+Qed.
-Axiom intuoffloat_intoffloat_2:
+Theorem intuoffloat_intoffloat_2:
forall x n,
cmp Clt x (floatofintu ox8000_0000) = false ->
intuoffloat x = Some n ->
intoffloat (sub x (floatofintu ox8000_0000)) = Some (Int.sub n ox8000_0000).
+Proof.
+ assert (B2R _ _ (floatofintu ox8000_0000) = Z2R (Int.unsigned ox8000_0000)).
+ apply (fun x H => proj1 (binary_normalize64_exact x H)); split; reflexivity.
+ intros; unfold cmp in H0; pose proof (order_float_finite_correct x (floatofintu ox8000_0000)).
+ destruct (order_float x (floatofintu ox8000_0000)); try destruct c; try discriminate;
+ pose proof (intuoffloat_correct x); rewrite H1 in H3; destruct H3; specialize (H2 H3 eq_refl).
+ apply Rcompare_Eq_inv in H2; apply B2R_inj in H2.
+ subst x; vm_compute in H1; injection H1; intro; subst n; vm_compute; reflexivity.
+ destruct x; try discriminate H3;
+ [rewrite H in H2; simpl B2R in H2; apply (eq_Z2R 0) in H2; discriminate|reflexivity].
+ reflexivity.
+ rewrite H in H2; apply Rcompare_Gt_inv in H2; pose proof (intuoffloat_interval _ _ H1).
+ unfold sub, b64_minus.
+ exploit (Bminus_correct 53 1024 eq_refl eq_refl mode_NE x (floatofintu ox8000_0000)); [assumption|reflexivity|]; intro.
+ rewrite H, round_generic in H6.
+ match goal with [H6:if Rlt_bool ?x ?y then _ else _|-_] =>
+ pose proof (Rlt_bool_spec x y); destruct (Rlt_bool x y) end.
+ destruct H6.
+ match goal with [|- _ ?y = _] => pose proof (intoffloat_correct y); destruct (intoffloat y) end.
+ destruct H9.
+ f_equal; rewrite <- (Int.repr_signed i); unfold Int.sub; f_equal; apply eq_Z2R.
+ rewrite Z2R_minus, H10, H4.
+ unfold round, scaled_mantissa, F2R, Fexp, Fnum, round_mode; simpl bpow; repeat rewrite Rmult_1_r;
+ rewrite <- Z2R_minus; f_equal.
+ rewrite (Ztrunc_floor (B2R _ _ x)), <- Zfloor_minus, <- Ztrunc_floor;
+ [f_equal; assumption|apply Rle_0_minus; left; assumption|].
+ left; eapply Rlt_trans; [|now apply H2]; apply (Z2R_lt 0); reflexivity.
+ exfalso; simpl Zcompare in H6, H8; rewrite H6, H8 in H9.
+ destruct H9 as [|[]]; [discriminate|..].
+ eapply Rle_trans in H9; [|apply Rle_0_minus; left; assumption]; apply (le_Z2R 0) in H9; apply H9; reflexivity.
+ eapply Rle_lt_trans in H9; [|apply Rplus_lt_compat_r; now apply (proj2 H5)].
+ rewrite <- Z2R_opp, <- Z2R_plus in H9; apply lt_Z2R in H9; discriminate.
+ exfalso; inversion H7; rewrite Rabs_right in H8.
+ eapply Rle_lt_trans in H8. apply Rle_not_lt in H8; [assumption|apply (bpow_le _ 31); discriminate].
+ change (bpow radix2 31) with (Z2R(Zsucc Int.max_unsigned - Int.unsigned ox8000_0000)); rewrite Z2R_minus.
+ apply Rplus_lt_compat_r; exact (proj2 H5).
+ apply Rle_ge; apply Rle_0_minus; left; assumption.
+ now apply valid_rnd_round_mode.
+ apply Fprop_Sterbenz.sterbenz_aux; [now apply fexp_monotone|now apply generic_format_B2R| |].
+ rewrite <- H; now apply generic_format_B2R.
+ destruct H5; split; left; assumption.
+ now destruct H2.
+Qed.
(** Conversions from ints to floats can be defined as bitwise manipulations
over the in-memory representation. This is what the PowerPC port does.
@@ -153,15 +694,147 @@ Axiom intuoffloat_intoffloat_2:
Definition ox4330_0000 := Int.repr 1127219200. (**r [0x4330_0000] *)
-Axiom floatofintu_from_words:
+Lemma split_bits_or:
+ forall x,
+ split_bits 52 11
+ (Int64.unsigned
+ (Int64.or
+ (Int64.shl (Int64.repr (Int.unsigned ox4330_0000)) (Int64.repr 32))
+ (Int64.repr (Int.unsigned x)))) = (false, Int.unsigned x, 1075).
+Proof.
+intro; pose proof (Int.unsigned_range x); unfold split_bits; f_equal; f_equal.
+apply Zle_bool_false.
+match goal with [|-_ ?x < _] =>
+ pose proof (Int64.sign_bit_of_Z x);
+ destruct (zlt (Int64.unsigned x) Int64.half_modulus)
+end.
+assumption.
+unfold Int64.or, Int64.bitwise_binop in H0.
+rewrite Int64.unsigned_repr, Int64.bits_of_Z_of_bits in H0.
+rewrite orb_false_intro in H0; [discriminate|reflexivity|].
+rewrite Int64.sign_bit_of_Z.
+match goal with [|- ((if ?c then _ else _) = _)] => destruct c end.
+reflexivity.
+rewrite Int64.unsigned_repr in z0; [exfalso|]; now smart_omega.
+vm_compute; split; congruence.
+now apply Int64.Z_of_bits_range_2.
+change (2^52) with (two_p 52).
+rewrite <- Int64.zero_ext_mod, Int64.zero_ext_and by (vm_compute; intuition).
+rewrite Int64.and_commut, Int64.and_or_distrib.
+match goal with [|- _ (_ ?o _) = _] => change o with Int64.zero end.
+rewrite Int64.or_commut, Int64.or_zero, Int64.and_commut.
+rewrite <- Int64.zero_ext_and, Int64.zero_ext_mod by (compute; intuition).
+rewrite Int64.unsigned_repr; [apply Zmod_small; compute_this (two_p 52)|]; now smart_omega.
+match goal with [|- ?x mod _ = _] => rewrite <- (Int64.unsigned_repr x) end.
+change (2^52) with (two_p (Int64.unsigned (Int64.repr 52))). rewrite <- Int64.shru_div_two_p.
+change (2^11) with (two_p 11). rewrite <- Int64.or_shru.
+replace (Int64.shru (Int64.repr (Int.unsigned x)) (Int64.repr 52)) with Int64.zero;
+ [vm_compute; reflexivity|].
+rewrite Int64.shru_div_two_p, Int64.unsigned_repr by smart_omega.
+unfold Int64.zero; f_equal; rewrite Zdiv_small; [reflexivity|].
+simpl; compute_this (two_power_pos 52); now smart_omega.
+apply Zdiv_interval_2; try smart_omega; now apply Int64.unsigned_range_2.
+Qed.
+
+Lemma from_words_value:
+ forall x,
+ B2R _ _ (from_words ox4330_0000 x) =
+ (bpow radix2 52 + Z2R (Int.unsigned x))%R /\
+ is_finite _ _ (from_words ox4330_0000 x) = true.
+Proof.
+ intros; unfold from_words, double_of_bits, b64_of_bits, binary_float_of_bits.
+ rewrite B2R_FF2B; unfold is_finite; rewrite match_FF2B;
+ unfold binary_float_of_bits_aux; rewrite split_bits_or; simpl; pose proof (Int.unsigned_range x).
+ destruct (Int.unsigned x + Zpower_pos 2 52) as []_eqn.
+ exfalso; now smart_omega.
+ simpl; rewrite <- Heqz; unfold F2R; simpl.
+ rewrite <- (Z2R_plus 4503599627370496), Rmult_1_r.
+ split; [f_equal; compute_this (Zpower_pos 2 52); ring | reflexivity].
+ assert (Zneg p < 0) by reflexivity.
+ exfalso; now smart_omega.
+Qed.
+
+Theorem floatofintu_from_words:
forall x,
floatofintu x =
sub (from_words ox4330_0000 x) (from_words ox4330_0000 Int.zero).
+Proof.
+ intros; destruct (Int.eq_dec x Int.zero); [subst; vm_compute; reflexivity|].
+ assert (Int.unsigned x <> 0).
+ intro; destruct n; rewrite <- (Int.repr_unsigned x), H; reflexivity.
+ pose proof (Int.unsigned_range x).
+ pose proof (binary_normalize64_exact (Int.unsigned x)). destruct H1; [smart_omega|].
+ unfold floatofintu, sub, b64_minus.
+ match goal with [|- _ = Bminus _ _ _ _ _ ?x ?y] =>
+ pose proof (Bminus_correct 53 1024 eq_refl eq_refl mode_NE x y) end.
+ apply (fun f x y => f x y) in H3; try apply (fun x => proj2 (from_words_value x)).
+ do 2 rewrite (fun x => proj1 (from_words_value x)) in H3.
+ replace (bpow radix2 52 + Z2R (Int.unsigned x) -
+ (bpow radix2 52 + Z2R (Int.unsigned Int.zero)))%R with (Z2R (Int.unsigned x)) in H3 by (simpl; ring).
+ rewrite round_exact in H3 by smart_omega.
+ match goal with [H3:if Rlt_bool ?x ?y then _ else _ |- _] =>
+ pose proof (Rlt_bool_spec x y); destruct (Rlt_bool x y) end; destruct H3.
+ simpl Zcompare in *; apply B2R_inj;
+ try match goal with [H':B2R _ _ ?f = _ , H'':is_finite _ _ ?f = true |- is_finite_strict _ _ ?f = true] =>
+ destruct f; [
+ simpl in H'; change 0%R with (Z2R 0) in H'; apply eq_Z2R in H'; now destruct (H (eq_sym H')) |
+ discriminate H'' | discriminate H'' | reflexivity
+ ]
+ end.
+ rewrite H3; assumption.
+ inversion H4; change (bpow radix2 1024) with (Z2R (radix2 ^ 1024)) in H6; rewrite <- Z2R_abs in H6.
+ apply le_Z2R in H6; pose proof (Zabs_spec (Int.unsigned x));
+ exfalso; now smart_omega.
+Qed.
-Axiom floatofint_from_words:
+Lemma ox8000_0000_signed_unsigned:
+ forall x,
+ Int.unsigned (Int.add x ox8000_0000) = Int.signed x + Int.half_modulus.
+Proof.
+ intro; unfold Int.signed, Int.add; pose proof (Int.unsigned_range x).
+ destruct (zlt (Int.unsigned x) Int.half_modulus).
+ rewrite Int.unsigned_repr; compute_this (Int.unsigned ox8000_0000); now smart_omega.
+ rewrite (Int.eqm_samerepr _ (Int.unsigned x + -2147483648)).
+ rewrite Int.unsigned_repr; now smart_omega.
+ apply Int.eqm_add; [now apply Int.eqm_refl|exists 1;reflexivity].
+Qed.
+
+Theorem floatofint_from_words:
forall x,
floatofint x =
sub (from_words ox4330_0000 (Int.add x ox8000_0000))
(from_words ox4330_0000 ox8000_0000).
+Proof.
+ intros; destruct (Int.eq_dec x Int.zero); [subst; vm_compute; reflexivity|].
+ assert (Int.signed x <> 0).
+ intro; destruct n; rewrite <- (Int.repr_signed x), H; reflexivity.
+ pose proof (Int.signed_range x).
+ pose proof (binary_normalize64_exact (Int.signed x)); destruct H1; [now smart_omega|].
+ unfold floatofint, sub, b64_minus.
+ match goal with [|- _ = Bminus _ _ _ _ _ ?x ?y] =>
+ pose proof (Bminus_correct 53 1024 eq_refl eq_refl mode_NE x y) end.
+ apply (fun f x y => f x y) in H3; try apply (fun x => proj2 (from_words_value x)).
+ do 2 rewrite (fun x => proj1 (from_words_value x)) in H3.
+ replace (bpow radix2 52 + Z2R (Int.unsigned (Int.add x ox8000_0000)) -
+ (bpow radix2 52 + Z2R (Int.unsigned ox8000_0000)))%R with (Z2R (Int.signed x)) in H3
+ by (rewrite ox8000_0000_signed_unsigned; rewrite Z2R_plus; simpl; ring).
+ rewrite round_exact in H3 by smart_omega.
+ match goal with [H3:if Rlt_bool ?x ?y then _ else _ |- _] =>
+ pose proof (Rlt_bool_spec x y); destruct (Rlt_bool x y) end; destruct H3.
+ simpl Zcompare in *; apply B2R_inj;
+ try match goal with [H':B2R _ _ ?f = _ , H'':is_finite _ _ ?f = true |- is_finite_strict _ _ ?f = true] =>
+ destruct f; [
+ simpl in H'; change 0%R with (Z2R 0) in H'; apply eq_Z2R in H'; now destruct (H (eq_sym H')) |
+ discriminate H'' | discriminate H'' | reflexivity
+ ]
+ end.
+ rewrite H3; assumption.
+ inversion H4; unfold bpow in H6; rewrite <- Z2R_abs in H6;
+ apply le_Z2R in H6; pose proof (Zabs_spec (Int.signed x)); exfalso; now smart_omega.
+Qed.
+
+Global Opaque
+ zero eq_dec neg abs singleoffloat intoffloat intuoffloat floatofint floatofintu
+ add sub mul div cmp bits_of_double double_of_bits bits_of_single single_of_bits from_words.
End Float.
diff --git a/pg b/pg
index d63be34..9242303 100755
--- a/pg
+++ b/pg
@@ -15,7 +15,7 @@ make -q ${1}o || {
done)
}
COQPROGNAME="coqtop"
-COQPROGARGS="\"-I\" \"$PWD/lib\" \"-I\" \"$PWD/common\" \"-I\" \"$VARIANT\" \"-I\" \"$ARCH\" \"-I\" \"$PWD/backend\" \"-I\" \"$PWD/cfrontend\""
+COQPROGARGS="\"-I\" \"$PWD/lib\" \"-I\" \"$PWD/common\" \"-I\" \"$VARIANT\" \"-I\" \"$ARCH\" \"-I\" \"$PWD/backend\" \"-I\" \"$PWD/cfrontend\" \"-I\" \"$PWD/flocq\" \"-I\" \"$PWD/flocq/Appli\" \"-I\" \"$PWD/flocq/Calc\" \"-I\" \"$PWD/flocq/Core\" \"-I\" \"$PWD/flocq/Prop\""
emacs --eval "(setq coq-prog-name \"$COQPROGNAME\")" \
diff --git a/powerpc/PrintAsm.ml b/powerpc/PrintAsm.ml
index 3492de4..65d6ce0 100644
--- a/powerpc/PrintAsm.ml
+++ b/powerpc/PrintAsm.ml
@@ -669,8 +669,8 @@ let print_instruction oc tbl pc = function
| Plfi(r1, c) ->
let lbl = new_label() in
fprintf oc " addis %a, 0, %a\n" ireg GPR12 label_high lbl;
- fprintf oc " lfd %a, %a(%a) %s %.18g\n" freg r1 label_low lbl ireg GPR12 comment c;
- float_literals := (lbl, Int64.bits_of_float c) :: !float_literals;
+ fprintf oc " lfd %a, %a(%a) %s %.18g\n" freg r1 label_low lbl ireg GPR12 comment (camlfloat_of_coqfloat c);
+ float_literals := (lbl, camlint64_of_coqint (Floats.Float.bits_of_double c)) :: !float_literals;
| Plfs(r1, c, r2) ->
fprintf oc " lfs %a, %a(%a)\n" freg r1 constant c ireg r2
| Plfsx(r1, r2, r3) ->
@@ -1070,13 +1070,14 @@ let print_init oc = function
| Init_int32 n ->
fprintf oc " .long %ld\n" (camlint_of_coqint n)
| Init_float32 n ->
- fprintf oc " .long %ld %s %.18g\n" (Int32.bits_of_float n) comment n
+ fprintf oc " .long %ld %s %.18g\n" (camlint_of_coqint (Floats.Float.bits_of_single n))
+ comment (camlfloat_of_coqfloat n)
| Init_float64 n ->
- let b = Int64.bits_of_float n in
+ let b = camlint64_of_coqint (Floats.Float.bits_of_double n) in
fprintf oc " .long %Ld, %Ld %s %.18g\n"
(Int64.shift_right_logical b 32)
(Int64.logand b 0xFFFFFFFFL)
- comment n
+ comment (camlfloat_of_coqfloat n)
| Init_space n ->
let n = camlint_of_z n in
if n > 0l then fprintf oc " .space %ld\n" n
diff --git a/powerpc/PrintOp.ml b/powerpc/PrintOp.ml
index 5bc4658..a2f9ff8 100644
--- a/powerpc/PrintOp.ml
+++ b/powerpc/PrintOp.ml
@@ -48,7 +48,7 @@ let print_condition reg pp = function
let print_operation reg pp = function
| Omove, [r1] -> reg pp r1
| Ointconst n, [] -> fprintf pp "%ld" (camlint_of_coqint n)
- | Ofloatconst n, [] -> fprintf pp "%F" n
+ | Ofloatconst n, [] -> fprintf pp "%F" (camlfloat_of_coqfloat n)
| Oaddrsymbol(id, ofs), [] ->
fprintf pp "\"%s\" + %ld" (extern_atom id) (camlint_of_coqint ofs)
| Oaddrstack ofs, [] ->
diff --git a/test/c/Makefile b/test/c/Makefile
index 2b9f4b7..5fd3f55 100644
--- a/test/c/Makefile
+++ b/test/c/Makefile
@@ -13,7 +13,9 @@ TIME=xtime -o /dev/null -mintime 1.0 # Xavier's hack
PROGS=fib integr qsort fft sha1 aes almabench lists \
binarytrees fannkuch knucleotide mandelbrot nbody \
nsieve nsievebits spectral vmach \
- bisect chomp perlin
+ bisect chomp perlin floats
+
+PROGS_INTERP=floats
all_s: $(PROGS:%=%.s)
@@ -37,6 +39,12 @@ test:
else echo "$$i: FAILED"; \
fi; \
done
+ @for i in $(PROGS_INTERP); do \
+ if $(CCOMP) -interp -quiet $$i.c | cmp -s - Results/$$i; \
+ then echo "$$i (interpreted): passed"; \
+ else echo "$$i (interpreted): FAILED"; \
+ fi; \
+ done
test_gcc:
@for i in $(PROGS); do \
diff --git a/test/c/Results/floats b/test/c/Results/floats
new file mode 100644
index 0000000..e5e16af
--- /dev/null
+++ b/test/c/Results/floats
@@ -0,0 +1 @@
+0 error(s) detected.
diff --git a/test/c/floats.c b/test/c/floats.c
new file mode 100644
index 0000000..a3514fb
--- /dev/null
+++ b/test/c/floats.c
@@ -0,0 +1,7944 @@
+#include<stdio.h>
+
+#define STR_EXPAND(tok) #tok
+#define STR(tok) STR_EXPAND(tok)
+
+#if defined(__ppc__) || defined(__PPC__)
+#define ARCH_BIG_ENDIAN
+#elif defined(__i386__) || defined(__x86_64__) || defined(__ARMEL__)
+#undef ARCH_BIG_ENDIAN
+#else
+#error "unknown endianness"
+#endif
+
+union converter64 {
+ double dbl;
+ struct {
+#ifdef ARCH_BIG_ENDIAN
+ unsigned h, l;
+#else
+ unsigned l, h;
+#endif
+ } u64;
+};
+
+union converter32 { float f; unsigned u; };
+
+double double_of_bits(unsigned high, unsigned low) {
+ union converter64 c;
+ c.u64.l = low;
+ c.u64.h = high;
+ return c.dbl;
+}
+
+float single_of_bits(unsigned u) {
+ union converter32 c;
+ c.u = u;
+ return c.f;
+}
+
+int num_errors = 0;
+
+void comp64(double d, unsigned high, unsigned low, char* s) {
+ union converter64 c;
+ c.dbl = d;
+ if((c.u64.h & 0x7FF00000) == 0x7FF00000 && (c.u64.l | (c.u64.h & 0xFFFFF)) != 0) {
+ c.u64.l = 0xFFFFFFFF;
+ c.u64.h = 0x7FFFFFFF;
+ }
+ if((high & 0x7FF00000) == 0x7FF00000 && (low | (high & 0xFFFFF)) != 0) {
+ low = 0xFFFFFFFF;
+ high = 0x7FFFFFFF;
+ }
+ if(low != c.u64.l || high != c.u64.h) {
+ printf("%s : %08x %08x (%a)\n", s, c.u64.h, c.u64.l, d);
+ num_errors++;
+ }
+}
+
+void comp32(float f, unsigned u, char* s) {
+ union converter32 c;
+ c.f = f;
+ if((c.u & 0x78000000) == 0x78000000 && (c.u & 0x07FFFFFF) != 0)
+ c.u = 0x7FFFFFFF;
+ if((u & 0x78000000) == 0x78000000 && (u & 0x07FFFFFF) != 0)
+ u = 0x7FFFFFFF;
+ if(u != c.u) {
+ printf("%s : %08x (%a)\n", s, c.u, (double)f);
+ num_errors++;
+ }
+}
+
+void compi(unsigned u1, unsigned u2, char* s) {
+ if(u1 != u2) {
+ printf("%s : %08x\n", s, u1);
+ num_errors++;
+ }
+}
+
+void f0(void) {
+ comp64((signed)0x00000001, 0x3ff00000, 0x00000000, STR(__LINE__)); // (signed)0x00000001 = 0x1p+0
+ comp64((signed)0x00000000, 0x00000000, 0x00000000, STR(__LINE__)); // (signed)0x00000000 = 0x0p+0
+ comp64((signed)0x00000002, 0x40000000, 0x00000000, STR(__LINE__)); // (signed)0x00000002 = 0x1p+1
+ comp64((signed)0x00000003, 0x40080000, 0x00000000, STR(__LINE__)); // (signed)0x00000003 = 0x1.8p+1
+ comp64((signed)0x00000010, 0x40300000, 0x00000000, STR(__LINE__)); // (signed)0x00000010 = 0x1p+4
+ comp64((signed)0x00000100, 0x40700000, 0x00000000, STR(__LINE__)); // (signed)0x00000100 = 0x1p+8
+ comp64((signed)0x00010001, 0x40f00010, 0x00000000, STR(__LINE__)); // (signed)0x00010001 = 0x1.0001p+16
+ comp64((signed)0x0000ffff, 0x40efffe0, 0x00000000, STR(__LINE__)); // (signed)0x0000ffff = 0x1.fffep+15
+ comp64((signed)0x00ffffff, 0x416fffff, 0xe0000000, STR(__LINE__)); // (signed)0x00ffffff = 0x1.fffffep+23
+ comp64((signed)0xffffffff, 0xbff00000, 0x00000000, STR(__LINE__)); // (signed)0xffffffff = -0x1p+0
+}
+
+void f1(void) {
+ comp64((signed)0xfffffff0, 0xc0300000, 0x00000000, STR(__LINE__)); // (signed)0xfffffff0 = -0x1p+4
+ comp64((signed)0x80000000, 0xc1e00000, 0x00000000, STR(__LINE__)); // (signed)0x80000000 = -0x1p+31
+ comp64(single_of_bits(0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 = 0x0p+0
+ comp64(single_of_bits(0x80000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 = -0x0p+0
+ comp64(single_of_bits(0x7fc00000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan = nan
+ comp64(single_of_bits(0xffc00000), 0xfff80000, 0x00000000, STR(__LINE__)); // -nan = -nan
+ comp64(single_of_bits(0x7f800000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf = inf
+ comp64(single_of_bits(0xff800000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf = -inf
+ comp64(single_of_bits(0x3f800000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 = 0x1p+0
+ comp64(single_of_bits(0x40000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+1 = 0x1p+1
+}
+
+void f2(void) {
+ comp64(single_of_bits(0x40400000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 = 0x1.8p+1
+ comp64(single_of_bits(0x41800000), 0x40300000, 0x00000000, STR(__LINE__)); // 0x1p+4 = 0x1p+4
+ comp64(single_of_bits(0x41880000), 0x40310000, 0x00000000, STR(__LINE__)); // 0x1.1p+4 = 0x1.1p+4
+ comp64(single_of_bits(0x43800000), 0x40700000, 0x00000000, STR(__LINE__)); // 0x1p+8 = 0x1p+8
+ comp64(single_of_bits(0x47800080), 0x40f00010, 0x00000000, STR(__LINE__)); // 0x1.0001p+16 = 0x1.0001p+16
+ comp64(single_of_bits(0x477fff00), 0x40efffe0, 0x00000000, STR(__LINE__)); // 0x1.fffep+15 = 0x1.fffep+15
+ comp64(single_of_bits(0xbf800000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 = -0x1p+0
+ comp64(single_of_bits(0xcf000000), 0xc1e00000, 0x00000000, STR(__LINE__)); // -0x1p+31 = -0x1p+31
+ comp64(single_of_bits(0xdf000000), 0xc3e00000, 0x00000000, STR(__LINE__)); // -0x1p+63 = -0x1p+63
+ comp64(single_of_bits(0x7f000000), 0x47e00000, 0x00000000, STR(__LINE__)); // 0x1p+127 = 0x1p+127
+}
+
+void f3(void) {
+ comp64(single_of_bits(0xfe800000), 0xc7d00000, 0x00000000, STR(__LINE__)); // -0x1p+126 = -0x1p+126
+ comp64(single_of_bits(0x00800000), 0x38100000, 0x00000000, STR(__LINE__)); // 0x1p-126 = 0x1p-126
+ comp64((unsigned)0x00000001, 0x3ff00000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000001 = 0x1p+0
+ comp64((unsigned)0x00000000, 0x00000000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000000 = 0x0p+0
+ comp64((unsigned)0x00000002, 0x40000000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000002 = 0x1p+1
+ comp64((unsigned)0x00000003, 0x40080000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000003 = 0x1.8p+1
+ comp64((unsigned)0x00000010, 0x40300000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000010 = 0x1p+4
+ comp64((unsigned)0x00000100, 0x40700000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000100 = 0x1p+8
+ comp64((unsigned)0x00010001, 0x40f00010, 0x00000000, STR(__LINE__)); // (unsigned)0x00010001 = 0x1.0001p+16
+ comp64((unsigned)0x0000ffff, 0x40efffe0, 0x00000000, STR(__LINE__)); // (unsigned)0x0000ffff = 0x1.fffep+15
+}
+
+void f4(void) {
+ comp64((unsigned)0x00ffffff, 0x416fffff, 0xe0000000, STR(__LINE__)); // (unsigned)0x00ffffff = 0x1.fffffep+23
+ comp64((unsigned)0x80000000, 0x41e00000, 0x00000000, STR(__LINE__)); // (unsigned)0x80000000 = 0x1p+31
+ compi((signed)double_of_bits(0x00000000, 0x00000000), 0x00000000, STR(__LINE__)); // (signed)0x0p+0 = 00000000
+ compi((signed)double_of_bits(0x3ff00000, 0x00000000), 0x00000001, STR(__LINE__)); // (signed)0x1p+0 = 00000001
+ compi((signed)double_of_bits(0x40000000, 0x00000000), 0x00000002, STR(__LINE__)); // (signed)0x1p+1 = 00000002
+ compi((signed)double_of_bits(0x40080000, 0x00000000), 0x00000003, STR(__LINE__)); // (signed)0x1.8p+1 = 00000003
+ compi((signed)double_of_bits(0x40300000, 0x00000000), 0x00000010, STR(__LINE__)); // (signed)0x1p+4 = 00000010
+ compi((signed)double_of_bits(0x40700000, 0x00000000), 0x00000100, STR(__LINE__)); // (signed)0x1p+8 = 00000100
+ compi((signed)double_of_bits(0x40f00010, 0x00000000), 0x00010001, STR(__LINE__)); // (signed)0x1.0001p+16 = 00010001
+ compi((signed)double_of_bits(0x40efffe0, 0x00000000), 0x0000ffff, STR(__LINE__)); // (signed)0x1.fffep+15 = 0000ffff
+}
+
+void f5(void) {
+ compi((signed)double_of_bits(0xbff00000, 0x00000000), 0xffffffff, STR(__LINE__)); // (signed)-0x1p+0 = ffffffff
+ compi((signed)double_of_bits(0xc1e00000, 0x00000000), 0x80000000, STR(__LINE__)); // (signed)-0x1p+31 = 80000000
+ compi((signed)double_of_bits(0x41dfffff, 0xffc00000), 0x7fffffff, STR(__LINE__)); // (signed)0x1.fffffffcp+30 = 7fffffff
+ compi((signed)double_of_bits(0xc1e00000, 0x00000000), 0x80000000, STR(__LINE__)); // (signed)-0x1p+31 = 80000000
+ compi((signed)double_of_bits(0x00000000, 0x00000001), 0x00000000, STR(__LINE__)); // (signed)0x0.0000000000001p-1022 = 00000000
+ compi((signed)double_of_bits(0x80000000, 0x00000001), 0x00000000, STR(__LINE__)); // (signed)-0x0.0000000000001p-1022 = 00000000
+ compi((signed)double_of_bits(0x00100000, 0x00000000), 0x00000000, STR(__LINE__)); // (signed)0x1p-1022 = 00000000
+ compi((signed)double_of_bits(0x80100000, 0x00000000), 0x00000000, STR(__LINE__)); // (signed)-0x1p-1022 = 00000000
+ compi((signed)double_of_bits(0x40240000, 0x00000001), 0x0000000a, STR(__LINE__)); // (signed)0x1.4000000000001p+3 = 0000000a
+ compi((signed)double_of_bits(0xc0240000, 0x00000001), 0xfffffff6, STR(__LINE__)); // (signed)-0x1.4000000000001p+3 = fffffff6
+}
+
+void f6(void) {
+ compi((signed)double_of_bits(0x3fefffff, 0xffffffff), 0x00000000, STR(__LINE__)); // (signed)0x1.fffffffffffffp-1 = 00000000
+ compi((signed)double_of_bits(0xbfefffff, 0xffffffff), 0x00000000, STR(__LINE__)); // (signed)-0x1.fffffffffffffp-1 = 00000000
+ compi((signed)double_of_bits(0x3fffffff, 0xffffffff), 0x00000001, STR(__LINE__)); // (signed)0x1.fffffffffffffp+0 = 00000001
+ compi((signed)double_of_bits(0xbfffffff, 0xffffffff), 0xffffffff, STR(__LINE__)); // (signed)-0x1.fffffffffffffp+0 = ffffffff
+ compi((signed)double_of_bits(0x3ff80000, 0x00000000), 0x00000001, STR(__LINE__)); // (signed)0x1.8p+0 = 00000001
+ compi((signed)double_of_bits(0xbff80000, 0x00000000), 0xffffffff, STR(__LINE__)); // (signed)-0x1.8p+0 = ffffffff
+ compi((signed)double_of_bits(0x40040000, 0x00000000), 0x00000002, STR(__LINE__)); // (signed)0x1.4p+1 = 00000002
+ compi((signed)double_of_bits(0xc0040000, 0x00000000), 0xfffffffe, STR(__LINE__)); // (signed)-0x1.4p+1 = fffffffe
+ compi((signed)double_of_bits(0x3ff80000, 0x00000001), 0x00000001, STR(__LINE__)); // (signed)0x1.8000000000001p+0 = 00000001
+ compi((signed)double_of_bits(0xbff80000, 0x00000001), 0xffffffff, STR(__LINE__)); // (signed)-0x1.8000000000001p+0 = ffffffff
+}
+
+void f7(void) {
+ compi((signed)double_of_bits(0x3ff7ffff, 0xffffffff), 0x00000001, STR(__LINE__)); // (signed)0x1.7ffffffffffffp+0 = 00000001
+ compi((signed)double_of_bits(0xbff7ffff, 0xffffffff), 0xffffffff, STR(__LINE__)); // (signed)-0x1.7ffffffffffffp+0 = ffffffff
+ compi((signed)double_of_bits(0x41d00000, 0x00400000), 0x40000001, STR(__LINE__)); // (signed)0x1.00000004p+30 = 40000001
+ compi((signed)double_of_bits(0x41700000, 0x00000001), 0x01000000, STR(__LINE__)); // (signed)0x1.0000000000001p+24 = 01000000
+ compi((signed)double_of_bits(0x416fffff, 0xffffffff), 0x00ffffff, STR(__LINE__)); // (signed)0x1.fffffffffffffp+23 = 00ffffff
+ compi((signed)double_of_bits(0x41600000, 0x00000001), 0x00800000, STR(__LINE__)); // (signed)0x1.0000000000001p+23 = 00800000
+ compi((signed)double_of_bits(0xc1600000, 0x00000001), 0xff800000, STR(__LINE__)); // (signed)-0x1.0000000000001p+23 = ff800000
+ compi((unsigned)double_of_bits(0x00000000, 0x00000000), 0x00000000, STR(__LINE__)); // (unsigned)0x0p+0 = 00000000
+ compi((unsigned)double_of_bits(0x3ff00000, 0x00000000), 0x00000001, STR(__LINE__)); // (unsigned)0x1p+0 = 00000001
+ compi((unsigned)double_of_bits(0x40000000, 0x00000000), 0x00000002, STR(__LINE__)); // (unsigned)0x1p+1 = 00000002
+}
+
+void f8(void) {
+ compi((unsigned)double_of_bits(0x40080000, 0x00000000), 0x00000003, STR(__LINE__)); // (unsigned)0x1.8p+1 = 00000003
+ compi((unsigned)double_of_bits(0x40300000, 0x00000000), 0x00000010, STR(__LINE__)); // (unsigned)0x1p+4 = 00000010
+ compi((unsigned)double_of_bits(0x40700000, 0x00000000), 0x00000100, STR(__LINE__)); // (unsigned)0x1p+8 = 00000100
+ compi((unsigned)double_of_bits(0x40f00010, 0x00000000), 0x00010001, STR(__LINE__)); // (unsigned)0x1.0001p+16 = 00010001
+ compi((unsigned)double_of_bits(0x40efffe0, 0x00000000), 0x0000ffff, STR(__LINE__)); // (unsigned)0x1.fffep+15 = 0000ffff
+ compi((unsigned)double_of_bits(0x41efffff, 0xffe00000), 0xffffffff, STR(__LINE__)); // (unsigned)0x1.fffffffep+31 = ffffffff
+ compi((unsigned)double_of_bits(0x00000000, 0x00000001), 0x00000000, STR(__LINE__)); // (unsigned)0x0.0000000000001p-1022 = 00000000
+ compi((unsigned)double_of_bits(0x00100000, 0x00000000), 0x00000000, STR(__LINE__)); // (unsigned)0x1p-1022 = 00000000
+ compi((unsigned)double_of_bits(0x40240000, 0x00000001), 0x0000000a, STR(__LINE__)); // (unsigned)0x1.4000000000001p+3 = 0000000a
+ compi((unsigned)double_of_bits(0x3fefffff, 0xffffffff), 0x00000000, STR(__LINE__)); // (unsigned)0x1.fffffffffffffp-1 = 00000000
+}
+
+void f9(void) {
+ compi((unsigned)double_of_bits(0x3fffffff, 0xffffffff), 0x00000001, STR(__LINE__)); // (unsigned)0x1.fffffffffffffp+0 = 00000001
+ compi((unsigned)double_of_bits(0x3ff80000, 0x00000000), 0x00000001, STR(__LINE__)); // (unsigned)0x1.8p+0 = 00000001
+ compi((unsigned)double_of_bits(0x40040000, 0x00000000), 0x00000002, STR(__LINE__)); // (unsigned)0x1.4p+1 = 00000002
+ compi((unsigned)double_of_bits(0x3ff80000, 0x00000001), 0x00000001, STR(__LINE__)); // (unsigned)0x1.8000000000001p+0 = 00000001
+ compi((unsigned)double_of_bits(0x3ff7ffff, 0xffffffff), 0x00000001, STR(__LINE__)); // (unsigned)0x1.7ffffffffffffp+0 = 00000001
+ compi((unsigned)double_of_bits(0x41e00000, 0x00200000), 0x80000001, STR(__LINE__)); // (unsigned)0x1.00000002p+31 = 80000001
+ compi((unsigned)double_of_bits(0x41700000, 0x00000001), 0x01000000, STR(__LINE__)); // (unsigned)0x1.0000000000001p+24 = 01000000
+ compi((unsigned)double_of_bits(0x416fffff, 0xffffffff), 0x00ffffff, STR(__LINE__)); // (unsigned)0x1.fffffffffffffp+23 = 00ffffff
+ compi((unsigned)double_of_bits(0x41600000, 0x00000001), 0x00800000, STR(__LINE__)); // (unsigned)0x1.0000000000001p+23 = 00800000
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1p+0 = 0x1p+1
+}
+
+void f10(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1p+1 = 0x1.8p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1p+0 = 0x1.8p+1
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1p+1 = -0x1.8p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1p+0 = -0x1.8p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1p+1 = 0x1p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x401c0000, 0x00000000), 0x40200000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.cp+2 = 0x1p+3
+ comp64(double_of_bits(0x401c0000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x40200000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 + 0x1p+0 = 0x1p+3
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc01c0000, 0x00000000), 0xc0200000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.cp+2 = -0x1p+3
+ comp64(double_of_bits(0xc01c0000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc0200000, 0x00000000, STR(__LINE__)); // -0x1.cp+2 + -0x1p+0 = -0x1p+3
+}
+
+void f11(void) {
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+0 = 0x1p+2
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1.4p+2 = 0x1p+2
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+0 = -0x1p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1.4p+2 = -0x1p+2
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1.4p+2 = -0x1.8p+1
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+1 = -0x1.8p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1.4p+2 = 0x1.8p+1
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+1 = 0x1.8p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xc0100000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1p+2 = -0x1p+1
+ comp64(double_of_bits(0xc0100000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+2 + 0x1p+1 = -0x1p+1
+}
+
+void f12(void) {
+ comp64(double_of_bits(0x40100000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+2 + -0x1p+1 = 0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x40100000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1p+2 = 0x1p+1
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 + -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 + 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 + -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0p+0 + 0x0.fffffffffffffp-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + 0x0p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x0p+0 + 0x0.fffffffffffffp-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + -0x0p+0 = 0x0.fffffffffffffp-1022
+}
+
+void f13(void) {
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x0p+0 + -0x0.fffffffffffffp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + 0x0p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0p+0 + -0x0.fffffffffffffp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + -0x0p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000003p-1022 + 0x0p+0 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0p+0 + 0x0.0000000000003p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x0p+0 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000003, STR(__LINE__)); // -0x0p+0 + 0x0.0000000000003p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x0p+0 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000003, STR(__LINE__)); // 0x0p+0 + -0x0.0000000000003p-1022 = -0x0.0000000000003p-1022
+}
+
+void f14(void) {
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000003p-1022 + -0x0p+0 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0p+0 + -0x0.0000000000003p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x80100000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0p+0 + -0x1p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1p-1022 + -0x0p+0 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1p-1022 + 0x0p+0 = 0x1p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x00100000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x1p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x80100000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // 0x0p+0 + -0x1p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1p-1022 + 0x0p+0 = -0x1p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x1p+1023 = 0x1p+1023
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x0p+0 = 0x1p+1023
+}
+
+void f15(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + 0x1p+1023 = 0x1p+1023
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x0p+0 = 0x1p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x0p+0 = -0x1p+1023
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x0p+0 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x0p+0 = -0x1p+1023
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x0p+0 = 0x1p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + 0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x0p+0 = -0x1p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + -0x1p+0 = -0x1p+0
+}
+
+void f16(void) {
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x0p+0 = 0x1p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x40140000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x0p+0 = 0x1.4p+2
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x40140000, 0x00000000, STR(__LINE__)); // -0x0p+0 + 0x1.4p+2 = 0x1.4p+2
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x40140000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + 0x0p+0 = 0x1.4p+2
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x40140000, 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x1.4p+2 = 0x1.4p+2
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + 0x0p+0 = inf
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0p+0 + inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + -0x0p+0 = inf
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + inf = inf
+}
+
+void f17(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + 0x0p+0 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0p+0 + -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + -0x0p+0 = -inf
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0p+0 + -inf = -inf
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x00000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + 0x0p+0 = nan
+ comp64(double_of_bits(0x00000000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x80000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -0x0p+0 = nan
+ comp64(double_of_bits(0x80000000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 + nan = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + -inf = -inf
+}
+
+void f18(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf + inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf + -inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + 0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + 0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + -inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + -0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + -0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + -inf = -inf
+}
+
+void f19(void) {
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + 0x0.0000000000003p-1022 = inf
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + 0x0.0000000000003p-1022 = -inf
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + -0x0.0000000000003p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + -0x0.0000000000003p-1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + 0x1p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + inf = inf
+}
+
+void f20(void) {
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf + -0x1p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf + -0x1p+1023 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -inf = -inf
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -inf = nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf + nan = nan
+}
+
+void f21(void) {
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + nan = nan
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + 0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + 0x0.0000000000001p-1022 = nan
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -0x0.0000000000001p-1022 = nan
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + 0x1p+0 = nan
+}
+
+void f22(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1p+0 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -0x1p+0 = nan
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1p+0 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + 0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan + -0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + nan = nan
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x1p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0x7fefffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + 0x1.ffffffffffffep+1023 = inf
+}
+
+void f23(void) {
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0xffefffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + -0x1.ffffffffffffep+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + 0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7fefffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1.ffffffffffffep+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xffefffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1.ffffffffffffep+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1023 + 0x1p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x1.0000000000001p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000001) + double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 + -0x1p+1023 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x1.0000000000001p+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7cb00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1p+972 = inf
+}
+
+void f24(void) {
+ comp64(double_of_bits(0x7cb00000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+972 + 0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfcb00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1p+972 = -inf
+ comp64(double_of_bits(0xfcb00000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+972 + -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x3ff00000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1p+0 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xbff00000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1p+0 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x0.0000000000001p-1022 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+}
+
+void f25(void) {
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x0.0000000000001p-1022 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7c800000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1p+969 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7c800000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1p+969 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfc800000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1p+969 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xfc800000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1p+969 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7c800000, 0x00000001), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1.0000000000001p+969 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7c800000, 0x00000001) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000001p+969 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfc800000, 0x00000001), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1.0000000000001p+969 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xfc800000, 0x00000001) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.0000000000001p+969 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + 0x1p+1023 = inf
+}
+
+void f26(void) {
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x7fdfffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x1.fffffffffffffp+1022 = inf
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + -0x1p+1023 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xffdfffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x1.fffffffffffffp+1022 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7c980000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1.8p+970 = inf
+ comp64(double_of_bits(0x7c980000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+970 + 0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfc980000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1.8p+970 = -inf
+ comp64(double_of_bits(0xfc980000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+970 + -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7c900000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1.0000000000001p+970 = inf
+ comp64(double_of_bits(0x7c900000, 0x00000001) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+970 + 0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfc900000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1.0000000000001p+970 = -inf
+}
+
+void f27(void) {
+ comp64(double_of_bits(0xfc900000, 0x00000001) + double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+970 + -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x7c980000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + 0x1.8000000000001p+970 = inf
+ comp64(double_of_bits(0x7c980000, 0x00000001) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.8000000000001p+970 + 0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0xfc980000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + -0x1.8000000000001p+970 = -inf
+ comp64(double_of_bits(0xfc980000, 0x00000001) + double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.8000000000001p+970 + -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x1p+0 = 0x1p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1p+1023 = 0x1p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x1p+0 = -0x1p+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0x3ff00000, 0x00000000), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + 0x1p+0 = 0x1.fffffffffffffp+1022
+}
+
+void f28(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x7fdfffff, 0xffffffff), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 + 0x1.fffffffffffffp+1022 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0x3ff00000, 0x00000000), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + 0x1p+0 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x7fefffff, 0xfffffffe), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 + 0x1.ffffffffffffep+1023 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0xbff00000, 0x00000000), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + -0x1p+0 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xffefffff, 0xfffffffe), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 + -0x1.ffffffffffffep+1023 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1p+1023 = 0x1p+1023
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + 0x0.0000000000001p-1022 = 0x1p+1023
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + -0x0.0000000000001p-1022 = -0x1p+1023
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x7fdfffff, 0xffffffff), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.fffffffffffffp+1022 = 0x1.fffffffffffffp+1022
+}
+
+void f29(void) {
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + 0x0.0000000000001p-1022 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xffdfffff, 0xffffffff), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.fffffffffffffp+1022 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + -0x0.0000000000001p-1022 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x7fefffff, 0xfffffffe), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.ffffffffffffep+1023 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0x00000000, 0x00000001), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + 0x0.0000000000001p-1022 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xffefffff, 0xfffffffe), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.ffffffffffffep+1023 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0x80000000, 0x00000001), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + -0x0.0000000000001p-1022 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x7ca00000, 0x00000000), 0x7fe00000, 0x00000001, STR(__LINE__)); // 0x1p+1023 + 0x1p+971 = 0x1.0000000000001p+1023
+ comp64(double_of_bits(0x7ca00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000001, STR(__LINE__)); // 0x1p+971 + 0x1p+1023 = 0x1.0000000000001p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0xfca00000, 0x00000000), 0xffe00000, 0x00000001, STR(__LINE__)); // -0x1p+1023 + -0x1p+971 = -0x1.0000000000001p+1023
+}
+
+void f30(void) {
+ comp64(double_of_bits(0xfca00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000001, STR(__LINE__)); // -0x1p+971 + -0x1p+1023 = -0x1.0000000000001p+1023
+ comp64(double_of_bits(0x7fdfffff, 0xfffffffe) + double_of_bits(0x7fdfffff, 0xfffffffe), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+1022 + 0x1.ffffffffffffep+1022 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffdfffff, 0xfffffffe) + double_of_bits(0xffdfffff, 0xfffffffe), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+1022 + -0x1.ffffffffffffep+1022 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 + 0x1.8p+1 = 0x1.8p+2
+ comp64(double_of_bits(0x00100000, 0x00000000) + double_of_bits(0x00100000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // 0x1p-1022 + 0x1p-1022 = 0x1p-1021
+ comp64(double_of_bits(0x7fd00000, 0x00000000) + double_of_bits(0x7fd00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1022 + 0x1p+1022 = 0x1p+1023
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x000fffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + 0x0.fffffffffffffp-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x800fffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + -0x0.fffffffffffffp-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x00000000, 0x00000004) + double_of_bits(0x00000000, 0x00000004), 0x00000000, 0x00000008, STR(__LINE__)); // 0x0.0000000000004p-1022 + 0x0.0000000000004p-1022 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000004) + double_of_bits(0x80000000, 0x00000004), 0x80000000, 0x00000008, STR(__LINE__)); // -0x0.0000000000004p-1022 + -0x0.0000000000004p-1022 = -0x0.0000000000008p-1022
+}
+
+void f31(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40200000, 0x00000000), 0x40220000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+3 = 0x1.2p+3
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40220000, 0x00000000, STR(__LINE__)); // 0x1p+3 + 0x1.0000000000001p+0 = 0x1.2p+3
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0xc0200000, 0x00000000), 0xc0220000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + -0x1p+3 = -0x1.2p+3
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xc0220000, 0x00000000, STR(__LINE__)); // -0x1p+3 + -0x1.0000000000001p+0 = -0x1.2p+3
+ comp64(double_of_bits(0x43600000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x43600000, 0x00000000, STR(__LINE__)); // 0x1p+55 + 0x1p+0 = 0x1p+55
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x43600000, 0x00000000), 0x43600000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1p+55 = 0x1p+55
+ comp64(double_of_bits(0xc3600000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc3600000, 0x00000000, STR(__LINE__)); // -0x1p+55 + -0x1p+0 = -0x1p+55
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc3600000, 0x00000000), 0xc3600000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1p+55 = -0x1p+55
+}
+
+void f32(void) {
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x00400000, 0x00000000, STR(__LINE__)); // 0x1p-1019 + 0x0.0000000000001p-1022 = 0x1p-1019
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x00400000, 0x00000000), 0x00400000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1p-1019 = 0x1p-1019
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0x80400000, 0x00000000, STR(__LINE__)); // -0x1p-1019 + -0x0.0000000000001p-1022 = -0x1p-1019
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x80400000, 0x00000000), 0x80400000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1p-1019 = -0x1p-1019
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x3ff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x0.0000000000001p-1022 = 0x1p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xbff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1p+0 = -0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x0.0000000000001p-1022 = -0x1p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.fffffffffffffp-1 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp-1 + 0x0.0000000000001p-1022 = 0x1.fffffffffffffp-1
+}
+
+void f33(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xbfefffff, 0xffffffff), 0xbfefffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.fffffffffffffp-1 = -0x1.fffffffffffffp-1
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0xbfefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp-1 + -0x0.0000000000001p-1022 = -0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x3fffffff, 0xffffffff), 0x3fffffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.fffffffffffffp+0 = 0x1.fffffffffffffp+0
+ comp64(double_of_bits(0x3fffffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0x3fffffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+0 + 0x0.0000000000001p-1022 = 0x1.fffffffffffffp+0
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xbfffffff, 0xffffffff), 0xbfffffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.fffffffffffffp+0 = -0x1.fffffffffffffp+0
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0xbfffffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+0 + -0x0.0000000000001p-1022 = -0x1.fffffffffffffp+0
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x3fffffff, 0xfffffffe), 0x3fffffff, 0xfffffffe, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.ffffffffffffep+0 = 0x1.ffffffffffffep+0
+ comp64(double_of_bits(0x3fffffff, 0xfffffffe) + double_of_bits(0x00000000, 0x00000001), 0x3fffffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+0 + 0x0.0000000000001p-1022 = 0x1.ffffffffffffep+0
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0xbfffffff, 0xfffffffe), 0xbfffffff, 0xfffffffe, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.ffffffffffffep+0 = -0x1.ffffffffffffep+0
+ comp64(double_of_bits(0xbfffffff, 0xfffffffe) + double_of_bits(0x80000000, 0x00000001), 0xbfffffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+0 + -0x0.0000000000001p-1022 = -0x1.ffffffffffffep+0
+}
+
+void f34(void) {
+ comp64(double_of_bits(0x436fffff, 0xffffffff) + double_of_bits(0x40260000, 0x00000000), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+55 + 0x1.6p+3 = 0x1p+56
+ comp64(double_of_bits(0x40260000, 0x00000000) + double_of_bits(0x436fffff, 0xffffffff), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.6p+3 + 0x1.fffffffffffffp+55 = 0x1p+56
+ comp64(double_of_bits(0xc36fffff, 0xffffffff) + double_of_bits(0xc0260000, 0x00000000), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+55 + -0x1.6p+3 = -0x1p+56
+ comp64(double_of_bits(0xc0260000, 0x00000000) + double_of_bits(0xc36fffff, 0xffffffff), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.6p+3 + -0x1.fffffffffffffp+55 = -0x1p+56
+ comp64(double_of_bits(0x436c0000, 0x00000000) + double_of_bits(0xc01c0000, 0x00000000), 0x436bffff, 0xffffffff, STR(__LINE__)); // 0x1.cp+55 + -0x1.cp+2 = 0x1.bffffffffffffp+55
+ comp64(double_of_bits(0xc01c0000, 0x00000000) + double_of_bits(0x436c0000, 0x00000000), 0x436bffff, 0xffffffff, STR(__LINE__)); // -0x1.cp+2 + 0x1.cp+55 = 0x1.bffffffffffffp+55
+ comp64(double_of_bits(0x401c0000, 0x00000000) + double_of_bits(0xc36c0000, 0x00000000), 0xc36bffff, 0xffffffff, STR(__LINE__)); // 0x1.cp+2 + -0x1.cp+55 = -0x1.bffffffffffffp+55
+ comp64(double_of_bits(0xc36c0000, 0x00000000) + double_of_bits(0x401c0000, 0x00000000), 0xc36bffff, 0xffffffff, STR(__LINE__)); // -0x1.cp+55 + 0x1.cp+2 = -0x1.bffffffffffffp+55
+ comp64(double_of_bits(0x00500000, 0x00000000) + double_of_bits(0x80000000, 0x00000007), 0x004fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1018 + -0x0.0000000000007p-1022 = 0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x80000000, 0x00000007) + double_of_bits(0x00500000, 0x00000000), 0x004fffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000007p-1022 + 0x1p-1018 = 0x1.fffffffffffffp-1019
+}
+
+void f35(void) {
+ comp64(double_of_bits(0x80500000, 0x00000000) + double_of_bits(0x00000000, 0x00000007), 0x804fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1018 + 0x0.0000000000007p-1022 = -0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x00000000, 0x00000007) + double_of_bits(0x80500000, 0x00000000), 0x804fffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000007p-1022 + -0x1p-1018 = -0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40100000, 0x00000000), 0x40140000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+2 = 0x1.4p+2
+ comp64(double_of_bits(0x40100000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40140000, 0x00000000, STR(__LINE__)); // 0x1p+2 + 0x1.0000000000001p+0 = 0x1.4p+2
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0xc0100000, 0x00000000), 0xc0140000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + -0x1p+2 = -0x1.4p+2
+ comp64(double_of_bits(0xc0100000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xc0140000, 0x00000000, STR(__LINE__)); // -0x1p+2 + -0x1.0000000000001p+0 = -0x1.4p+2
+ comp64(double_of_bits(0x43500000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x43500000, 0x00000000, STR(__LINE__)); // 0x1p+54 + 0x1p+0 = 0x1p+54
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x43500000, 0x00000000), 0x43500000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1p+54 = 0x1p+54
+ comp64(double_of_bits(0xc3500000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc3500000, 0x00000000, STR(__LINE__)); // -0x1p+54 + -0x1p+0 = -0x1p+54
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc3500000, 0x00000000), 0xc3500000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1p+54 = -0x1p+54
+}
+
+void f36(void) {
+ comp64(double_of_bits(0x40240000, 0x00000000) + double_of_bits(0x436fffff, 0xffffffff), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.4p+3 + 0x1.fffffffffffffp+55 = 0x1p+56
+ comp64(double_of_bits(0x436fffff, 0xffffffff) + double_of_bits(0x40240000, 0x00000000), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+55 + 0x1.4p+3 = 0x1p+56
+ comp64(double_of_bits(0xc0240000, 0x00000000) + double_of_bits(0xc36fffff, 0xffffffff), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.4p+3 + -0x1.fffffffffffffp+55 = -0x1p+56
+ comp64(double_of_bits(0xc36fffff, 0xffffffff) + double_of_bits(0xc0240000, 0x00000000), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+55 + -0x1.4p+3 = -0x1p+56
+ comp64(double_of_bits(0x40100000, 0x00000000) + double_of_bits(0xbcb80000, 0x00000000), 0x400fffff, 0xffffffff, STR(__LINE__)); // 0x1p+2 + -0x1.8p-52 = 0x1.fffffffffffffp+1
+ comp64(double_of_bits(0xbcb80000, 0x00000000) + double_of_bits(0x40100000, 0x00000000), 0x400fffff, 0xffffffff, STR(__LINE__)); // -0x1.8p-52 + 0x1p+2 = 0x1.fffffffffffffp+1
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0xc3600000, 0x00000000), 0xc35fffff, 0xffffffff, STR(__LINE__)); // 0x1.8p+1 + -0x1p+55 = -0x1.fffffffffffffp+54
+ comp64(double_of_bits(0xc3600000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0xc35fffff, 0xffffffff, STR(__LINE__)); // -0x1p+55 + 0x1.8p+1 = -0x1.fffffffffffffp+54
+ comp64(double_of_bits(0xc0100000, 0x00000000) + double_of_bits(0x3cb80000, 0x00000000), 0xc00fffff, 0xffffffff, STR(__LINE__)); // -0x1p+2 + 0x1.8p-52 = -0x1.fffffffffffffp+1
+ comp64(double_of_bits(0x3cb80000, 0x00000000) + double_of_bits(0xc0100000, 0x00000000), 0xc00fffff, 0xffffffff, STR(__LINE__)); // 0x1.8p-52 + -0x1p+2 = -0x1.fffffffffffffp+1
+}
+
+void f37(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0x43600000, 0x00000000), 0x435fffff, 0xffffffff, STR(__LINE__)); // -0x1.8p+1 + 0x1p+55 = 0x1.fffffffffffffp+54
+ comp64(double_of_bits(0x43600000, 0x00000000) + double_of_bits(0xc0080000, 0x00000000), 0x435fffff, 0xffffffff, STR(__LINE__)); // 0x1p+55 + -0x1.8p+1 = 0x1.fffffffffffffp+54
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x003fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1019 + -0x0.0000000000003p-1022 = 0x1.fffffffffffffp-1020
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x00400000, 0x00000000), 0x003fffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x1p-1019 = 0x1.fffffffffffffp-1020
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x803fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1019 + 0x0.0000000000003p-1022 = -0x1.fffffffffffffp-1020
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x80400000, 0x00000000), 0x803fffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x1p-1019 = -0x1.fffffffffffffp-1020
+ comp64(double_of_bits(0x3ff00000, 0x00000003) + double_of_bits(0x40200000, 0x00000000), 0x40220000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p+0 + 0x1p+3 = 0x1.2p+3
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000003), 0x40220000, 0x00000000, STR(__LINE__)); // 0x1p+3 + 0x1.0000000000003p+0 = 0x1.2p+3
+ comp64(double_of_bits(0xbff00000, 0x00000003) + double_of_bits(0xc0200000, 0x00000000), 0xc0220000, 0x00000000, STR(__LINE__)); // -0x1.0000000000003p+0 + -0x1p+3 = -0x1.2p+3
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0xbff00000, 0x00000003), 0xc0220000, 0x00000000, STR(__LINE__)); // -0x1p+3 + -0x1.0000000000003p+0 = -0x1.2p+3
+}
+
+void f38(void) {
+ comp64(double_of_bits(0x400fffff, 0xffffffff) + double_of_bits(0x3cafffff, 0xffffffff), 0x400fffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1 + 0x1.fffffffffffffp-53 = 0x1.fffffffffffffp+1
+ comp64(double_of_bits(0x3cafffff, 0xffffffff) + double_of_bits(0x400fffff, 0xffffffff), 0x400fffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp-53 + 0x1.fffffffffffffp+1 = 0x1.fffffffffffffp+1
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) + double_of_bits(0xbcafffff, 0xffffffff), 0xc00fffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1 + -0x1.fffffffffffffp-53 = -0x1.fffffffffffffp+1
+ comp64(double_of_bits(0xbcafffff, 0xffffffff) + double_of_bits(0xc00fffff, 0xffffffff), 0xc00fffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp-53 + -0x1.fffffffffffffp+1 = -0x1.fffffffffffffp+1
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x00400000, 0x00000000, STR(__LINE__)); // 0x1p-1019 + 0x0.0000000000003p-1022 = 0x1p-1019
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x00400000, 0x00000000), 0x00400000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + 0x1p-1019 = 0x1p-1019
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x80400000, 0x00000000), 0x80400000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + -0x1p-1019 = -0x1p-1019
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x80400000, 0x00000000, STR(__LINE__)); // -0x1p-1019 + -0x0.0000000000003p-1022 = -0x1p-1019
+ comp64(double_of_bits(0x402c0000, 0x00000000) + double_of_bits(0x436fffff, 0xffffffff), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.cp+3 + 0x1.fffffffffffffp+55 = 0x1p+56
+ comp64(double_of_bits(0x436fffff, 0xffffffff) + double_of_bits(0x402c0000, 0x00000000), 0x43700000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+55 + 0x1.cp+3 = 0x1p+56
+}
+
+void f39(void) {
+ comp64(double_of_bits(0xc02c0000, 0x00000000) + double_of_bits(0xc36fffff, 0xffffffff), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.cp+3 + -0x1.fffffffffffffp+55 = -0x1p+56
+ comp64(double_of_bits(0xc36fffff, 0xffffffff) + double_of_bits(0xc02c0000, 0x00000000), 0xc3700000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+55 + -0x1.cp+3 = -0x1p+56
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0xbcc40000, 0x00000000), 0x401fffff, 0xffffffff, STR(__LINE__)); // 0x1p+3 + -0x1.4p-51 = 0x1.fffffffffffffp+2
+ comp64(double_of_bits(0xbcc40000, 0x00000000) + double_of_bits(0x40200000, 0x00000000), 0x401fffff, 0xffffffff, STR(__LINE__)); // -0x1.4p-51 + 0x1p+3 = 0x1.fffffffffffffp+2
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0xc3700000, 0x00000000), 0xc36fffff, 0xffffffff, STR(__LINE__)); // 0x1.4p+2 + -0x1p+56 = -0x1.fffffffffffffp+55
+ comp64(double_of_bits(0xc3700000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0xc36fffff, 0xffffffff, STR(__LINE__)); // -0x1p+56 + 0x1.4p+2 = -0x1.fffffffffffffp+55
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0x3cc40000, 0x00000000), 0xc01fffff, 0xffffffff, STR(__LINE__)); // -0x1p+3 + 0x1.4p-51 = -0x1.fffffffffffffp+2
+ comp64(double_of_bits(0x3cc40000, 0x00000000) + double_of_bits(0xc0200000, 0x00000000), 0xc01fffff, 0xffffffff, STR(__LINE__)); // 0x1.4p-51 + -0x1p+3 = -0x1.fffffffffffffp+2
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x43700000, 0x00000000), 0x436fffff, 0xffffffff, STR(__LINE__)); // -0x1.4p+2 + 0x1p+56 = 0x1.fffffffffffffp+55
+ comp64(double_of_bits(0x43700000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0x436fffff, 0xffffffff, STR(__LINE__)); // 0x1p+56 + -0x1.4p+2 = 0x1.fffffffffffffp+55
+}
+
+void f40(void) {
+ comp64(double_of_bits(0x00500000, 0x00000000) + double_of_bits(0x80000000, 0x00000005), 0x004fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1018 + -0x0.0000000000005p-1022 = 0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x80000000, 0x00000005) + double_of_bits(0x00500000, 0x00000000), 0x004fffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000005p-1022 + 0x1p-1018 = 0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x80500000, 0x00000000) + double_of_bits(0x00000000, 0x00000005), 0x804fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1018 + 0x0.0000000000005p-1022 = -0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x00000000, 0x00000005) + double_of_bits(0x80500000, 0x00000000), 0x804fffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000005p-1022 + -0x1p-1018 = -0x1.fffffffffffffp-1019
+ comp64(double_of_bits(0x3cb00000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p-52 + 0x1p+1 = 0x1p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x3cb00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1p-52 = 0x1p+1
+ comp64(double_of_bits(0xbcb00000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p-52 + -0x1p+1 = -0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xbcb00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1p-52 = -0x1p+1
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x00200000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1p-1021 = 0x1p-1021
+ comp64(double_of_bits(0x00200000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x00200000, 0x00000000, STR(__LINE__)); // 0x1p-1021 + 0x0.0000000000001p-1022 = 0x1p-1021
+}
+
+void f41(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x80200000, 0x00000000), 0x80200000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1p-1021 = -0x1p-1021
+ comp64(double_of_bits(0x80200000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0x80200000, 0x00000000, STR(__LINE__)); // -0x1p-1021 + -0x0.0000000000001p-1022 = -0x1p-1021
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x3ff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+0 = 0x1p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.0000000000001p+0 = 0x1p+1
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0xbff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + -0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.0000000000001p+0 = -0x1p+1
+ comp64(double_of_bits(0x7fd00000, 0x00000001) + double_of_bits(0x7fd00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1022 + 0x1p+1022 = 0x1p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000000) + double_of_bits(0x7fd00000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1022 + 0x1.0000000000001p+1022 = 0x1p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x40000000, 0x00000001), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1.0000000000001p+1 = 0x1p+2
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0x40000000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1 + 0x1p+1 = 0x1p+2
+}
+
+void f42(void) {
+ comp64(double_of_bits(0x7fdfffff, 0xfffffffe) + double_of_bits(0x7fdfffff, 0xffffffff), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+1022 + 0x1.fffffffffffffp+1022 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0x7fdfffff, 0xfffffffe), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + 0x1.ffffffffffffep+1022 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffd00000, 0x00000001) + double_of_bits(0xffd00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1022 + -0x1p+1022 = -0x1p+1023
+ comp64(double_of_bits(0xffd00000, 0x00000000) + double_of_bits(0xffd00000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 + -0x1.0000000000001p+1022 = -0x1p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xc0000000, 0x00000001), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1.0000000000001p+1 = -0x1p+2
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0xc0000000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1 + -0x1p+1 = -0x1p+2
+ comp64(double_of_bits(0xffdfffff, 0xfffffffe) + double_of_bits(0xffdfffff, 0xffffffff), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+1022 + -0x1.fffffffffffffp+1022 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0xffdfffff, 0xfffffffe), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + -0x1.ffffffffffffep+1022 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0xbca00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + -0x1p-53 = 0x1p+0
+ comp64(double_of_bits(0xbca00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p-53 + 0x1.0000000000001p+0 = 0x1p+0
+}
+
+void f43(void) {
+ comp64(double_of_bits(0x3ca00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p-53 + -0x1.0000000000001p+0 = -0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0x3ca00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + 0x1p-53 = -0x1p+0
+ comp64(double_of_bits(0x00200000, 0x00000001) + double_of_bits(0x80000000, 0x00000001), 0x00200000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1021 + -0x0.0000000000001p-1022 = 0x1p-1021
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x00200000, 0x00000001), 0x00200000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x1.0000000000001p-1021 = 0x1p-1021
+ comp64(double_of_bits(0x80200000, 0x00000001) + double_of_bits(0x00000000, 0x00000001), 0x80200000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1021 + 0x0.0000000000001p-1022 = -0x1p-1021
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x80200000, 0x00000001), 0x80200000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x1.0000000000001p-1021 = -0x1p-1021
+ comp64(double_of_bits(0x3cb00000, 0x00000000) + double_of_bits(0x400fffff, 0xffffffff), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1p-52 + 0x1.fffffffffffffp+1 = 0x1p+2
+ comp64(double_of_bits(0x400fffff, 0xffffffff) + double_of_bits(0x3cb00000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1 + 0x1p-52 = 0x1p+2
+ comp64(double_of_bits(0xbcb00000, 0x00000000) + double_of_bits(0xc00fffff, 0xffffffff), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1p-52 + -0x1.fffffffffffffp+1 = -0x1p+2
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) + double_of_bits(0xbcb00000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1 + -0x1p-52 = -0x1p+2
+}
+
+void f44(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x00200000, 0x00000001), 0x00200000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x1.0000000000001p-1021 = 0x1.0000000000002p-1021
+ comp64(double_of_bits(0x00200000, 0x00000001) + double_of_bits(0x00000000, 0x00000001), 0x00200000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p-1021 + 0x0.0000000000001p-1022 = 0x1.0000000000002p-1021
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x80200000, 0x00000001), 0x80200000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x1.0000000000001p-1021 = -0x1.0000000000002p-1021
+ comp64(double_of_bits(0x80200000, 0x00000001) + double_of_bits(0x80000000, 0x00000001), 0x80200000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p-1021 + -0x0.0000000000001p-1022 = -0x1.0000000000002p-1021
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000003), 0x40000000, 0x00000002, STR(__LINE__)); // 0x1p+0 + 0x1.0000000000003p+0 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000003) + double_of_bits(0x3ff00000, 0x00000000), 0x40000000, 0x00000002, STR(__LINE__)); // 0x1.0000000000003p+0 + 0x1p+0 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0x40000000, 0x00000002), 0x40100000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+1 + 0x1.0000000000002p+1 = 0x1.0000000000002p+2
+ comp64(double_of_bits(0x40000000, 0x00000002) + double_of_bits(0x40000000, 0x00000001), 0x40100000, 0x00000002, STR(__LINE__)); // 0x1.0000000000002p+1 + 0x1.0000000000001p+1 = 0x1.0000000000002p+2
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000003), 0xc0000000, 0x00000002, STR(__LINE__)); // -0x1p+0 + -0x1.0000000000003p+0 = -0x1.0000000000002p+1
+ comp64(double_of_bits(0xbff00000, 0x00000003) + double_of_bits(0xbff00000, 0x00000000), 0xc0000000, 0x00000002, STR(__LINE__)); // -0x1.0000000000003p+0 + -0x1p+0 = -0x1.0000000000002p+1
+}
+
+void f45(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0xc0000000, 0x00000002), 0xc0100000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+1 + -0x1.0000000000002p+1 = -0x1.0000000000002p+2
+ comp64(double_of_bits(0xc0000000, 0x00000002) + double_of_bits(0xc0000000, 0x00000001), 0xc0100000, 0x00000002, STR(__LINE__)); // -0x1.0000000000002p+1 + -0x1.0000000000001p+1 = -0x1.0000000000002p+2
+ comp64(double_of_bits(0x3cb00000, 0x00000000) + double_of_bits(0x40000000, 0x00000001), 0x40000000, 0x00000002, STR(__LINE__)); // 0x1p-52 + 0x1.0000000000001p+1 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0x3cb00000, 0x00000000), 0x40000000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+1 + 0x1p-52 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0xbcb00000, 0x00000000) + double_of_bits(0xc0000000, 0x00000001), 0xc0000000, 0x00000002, STR(__LINE__)); // -0x1p-52 + -0x1.0000000000001p+1 = -0x1.0000000000002p+1
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0xbcb00000, 0x00000000), 0xc0000000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+1 + -0x1p-52 = -0x1.0000000000002p+1
+ comp64(double_of_bits(0x40000000, 0x00000002) + double_of_bits(0xbcb00000, 0x00000000), 0x40000000, 0x00000002, STR(__LINE__)); // 0x1.0000000000002p+1 + -0x1p-52 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0xbcb00000, 0x00000000) + double_of_bits(0x40000000, 0x00000002), 0x40000000, 0x00000002, STR(__LINE__)); // -0x1p-52 + 0x1.0000000000002p+1 = 0x1.0000000000002p+1
+ comp64(double_of_bits(0x3cb00000, 0x00000000) + double_of_bits(0xc0000000, 0x00000002), 0xc0000000, 0x00000002, STR(__LINE__)); // 0x1p-52 + -0x1.0000000000002p+1 = -0x1.0000000000002p+1
+ comp64(double_of_bits(0xc0000000, 0x00000002) + double_of_bits(0x3cb00000, 0x00000000), 0xc0000000, 0x00000002, STR(__LINE__)); // -0x1.0000000000002p+1 + 0x1p-52 = -0x1.0000000000002p+1
+}
+
+void f46(void) {
+ comp64(double_of_bits(0x00300000, 0x00000002) + double_of_bits(0x80000000, 0x00000001), 0x00300000, 0x00000002, STR(__LINE__)); // 0x1.0000000000002p-1020 + -0x0.0000000000001p-1022 = 0x1.0000000000002p-1020
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x00300000, 0x00000002), 0x00300000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x1.0000000000002p-1020 = 0x1.0000000000002p-1020
+ comp64(double_of_bits(0x80300000, 0x00000002) + double_of_bits(0x00000000, 0x00000001), 0x80300000, 0x00000002, STR(__LINE__)); // -0x1.0000000000002p-1020 + 0x0.0000000000001p-1022 = -0x1.0000000000002p-1020
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x80300000, 0x00000002), 0x80300000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x1.0000000000002p-1020 = -0x1.0000000000002p-1020
+ comp64(double_of_bits(0x3ff00000, 0x00000003) + double_of_bits(0x40100000, 0x00000000), 0x40140000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p+0 + 0x1p+2 = 0x1.4000000000001p+2
+ comp64(double_of_bits(0x40100000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000003), 0x40140000, 0x00000001, STR(__LINE__)); // 0x1p+2 + 0x1.0000000000003p+0 = 0x1.4000000000001p+2
+ comp64(double_of_bits(0xbff00000, 0x00000003) + double_of_bits(0xc0100000, 0x00000000), 0xc0140000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p+0 + -0x1p+2 = -0x1.4000000000001p+2
+ comp64(double_of_bits(0xc0100000, 0x00000000) + double_of_bits(0xbff00000, 0x00000003), 0xc0140000, 0x00000001, STR(__LINE__)); // -0x1p+2 + -0x1.0000000000003p+0 = -0x1.4000000000001p+2
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0x43580000, 0x00000000), 0x43580000, 0x00000001, STR(__LINE__)); // 0x1.8p+1 + 0x1.8p+54 = 0x1.8000000000001p+54
+ comp64(double_of_bits(0x43580000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0x43580000, 0x00000001, STR(__LINE__)); // 0x1.8p+54 + 0x1.8p+1 = 0x1.8000000000001p+54
+}
+
+void f47(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0xc3580000, 0x00000000), 0xc3580000, 0x00000001, STR(__LINE__)); // -0x1.8p+1 + -0x1.8p+54 = -0x1.8000000000001p+54
+ comp64(double_of_bits(0xc3580000, 0x00000000) + double_of_bits(0xc0080000, 0x00000000), 0xc3580000, 0x00000001, STR(__LINE__)); // -0x1.8p+54 + -0x1.8p+1 = -0x1.8000000000001p+54
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x00300000, 0x00000000), 0x00300000, 0x00000001, STR(__LINE__)); // 0x0.0000000000003p-1022 + 0x1p-1020 = 0x1.0000000000001p-1020
+ comp64(double_of_bits(0x00300000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x00300000, 0x00000001, STR(__LINE__)); // 0x1p-1020 + 0x0.0000000000003p-1022 = 0x1.0000000000001p-1020
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x80300000, 0x00000000), 0x80300000, 0x00000001, STR(__LINE__)); // -0x0.0000000000003p-1022 + -0x1p-1020 = -0x1.0000000000001p-1020
+ comp64(double_of_bits(0x80300000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x80300000, 0x00000001, STR(__LINE__)); // -0x1p-1020 + -0x0.0000000000003p-1022 = -0x1.0000000000001p-1020
+ comp64(double_of_bits(0x3fffffff, 0xffffffff) + double_of_bits(0x3cc40000, 0x00000000), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp+0 + 0x1.4p-51 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0x3cc40000, 0x00000000) + double_of_bits(0x3fffffff, 0xffffffff), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.4p-51 + 0x1.fffffffffffffp+0 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0xbcc40000, 0x00000000), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp+0 + -0x1.4p-51 = -0x1.0000000000001p+1
+ comp64(double_of_bits(0xbcc40000, 0x00000000) + double_of_bits(0xbfffffff, 0xffffffff), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.4p-51 + -0x1.fffffffffffffp+0 = -0x1.0000000000001p+1
+}
+
+void f48(void) {
+ comp64(double_of_bits(0x43600000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0x43600000, 0x00000000, STR(__LINE__)); // 0x1p+55 + -0x1p+0 = 0x1p+55
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x43600000, 0x00000000), 0x43600000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1p+55 = 0x1p+55
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xc3600000, 0x00000000), 0xc3600000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1p+55 = -0x1p+55
+ comp64(double_of_bits(0xc3600000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0xc3600000, 0x00000000, STR(__LINE__)); // -0x1p+55 + 0x1p+0 = -0x1p+55
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x80000000, 0x00000001), 0x00400000, 0x00000000, STR(__LINE__)); // 0x1p-1019 + -0x0.0000000000001p-1022 = 0x1p-1019
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x00400000, 0x00000000), 0x00400000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x1p-1019 = 0x1p-1019
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x00000000, 0x00000001), 0x80400000, 0x00000000, STR(__LINE__)); // -0x1p-1019 + 0x0.0000000000001p-1022 = -0x1p-1019
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x80400000, 0x00000000), 0x80400000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x1p-1019 = -0x1p-1019
+ comp64(double_of_bits(0x3ff00000, 0x00000005) + double_of_bits(0x40200000, 0x00000000), 0x40220000, 0x00000001, STR(__LINE__)); // 0x1.0000000000005p+0 + 0x1p+3 = 0x1.2000000000001p+3
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000005), 0x40220000, 0x00000001, STR(__LINE__)); // 0x1p+3 + 0x1.0000000000005p+0 = 0x1.2000000000001p+3
+}
+
+void f49(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000005) + double_of_bits(0xc0200000, 0x00000000), 0xc0220000, 0x00000001, STR(__LINE__)); // -0x1.0000000000005p+0 + -0x1p+3 = -0x1.2000000000001p+3
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0xbff00000, 0x00000005), 0xc0220000, 0x00000001, STR(__LINE__)); // -0x1p+3 + -0x1.0000000000005p+0 = -0x1.2000000000001p+3
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0x43600000, 0x00000000), 0x43600000, 0x00000001, STR(__LINE__)); // 0x1.4p+2 + 0x1p+55 = 0x1.0000000000001p+55
+ comp64(double_of_bits(0x43600000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x43600000, 0x00000001, STR(__LINE__)); // 0x1p+55 + 0x1.4p+2 = 0x1.0000000000001p+55
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0xc3600000, 0x00000000), 0xc3600000, 0x00000001, STR(__LINE__)); // -0x1.4p+2 + -0x1p+55 = -0x1.0000000000001p+55
+ comp64(double_of_bits(0xc3600000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0xc3600000, 0x00000001, STR(__LINE__)); // -0x1p+55 + -0x1.4p+2 = -0x1.0000000000001p+55
+ comp64(double_of_bits(0x00000000, 0x00000005) + double_of_bits(0x00400000, 0x00000000), 0x00400000, 0x00000001, STR(__LINE__)); // 0x0.0000000000005p-1022 + 0x1p-1019 = 0x1.0000000000001p-1019
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x00000000, 0x00000005), 0x00400000, 0x00000001, STR(__LINE__)); // 0x1p-1019 + 0x0.0000000000005p-1022 = 0x1.0000000000001p-1019
+ comp64(double_of_bits(0x80000000, 0x00000005) + double_of_bits(0x80400000, 0x00000000), 0x80400000, 0x00000001, STR(__LINE__)); // -0x0.0000000000005p-1022 + -0x1p-1019 = -0x1.0000000000001p-1019
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x80000000, 0x00000005), 0x80400000, 0x00000001, STR(__LINE__)); // -0x1p-1019 + -0x0.0000000000005p-1022 = -0x1.0000000000001p-1019
+}
+
+void f50(void) {
+ comp64(double_of_bits(0x3fffffff, 0xffffffff) + double_of_bits(0x3cc00000, 0x00000001), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp+0 + 0x1.0000000000001p-51 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0x3cc00000, 0x00000001) + double_of_bits(0x3fffffff, 0xffffffff), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-51 + 0x1.fffffffffffffp+0 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0xbcc00000, 0x00000001), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp+0 + -0x1.0000000000001p-51 = -0x1.0000000000001p+1
+ comp64(double_of_bits(0xbcc00000, 0x00000001) + double_of_bits(0xbfffffff, 0xffffffff), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-51 + -0x1.fffffffffffffp+0 = -0x1.0000000000001p+1
+ comp64(double_of_bits(0x43780000, 0x00000000) + double_of_bits(0xc0080000, 0x00000000), 0x43780000, 0x00000000, STR(__LINE__)); // 0x1.8p+56 + -0x1.8p+1 = 0x1.8p+56
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0x43780000, 0x00000000), 0x43780000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 + 0x1.8p+56 = 0x1.8p+56
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0xc3780000, 0x00000000), 0xc3780000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 + -0x1.8p+56 = -0x1.8p+56
+ comp64(double_of_bits(0xc3780000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0xc3780000, 0x00000000, STR(__LINE__)); // -0x1.8p+56 + 0x1.8p+1 = -0x1.8p+56
+ comp64(double_of_bits(0x00500000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x00500000, 0x00000000, STR(__LINE__)); // 0x1p-1018 + -0x0.0000000000003p-1022 = 0x1p-1018
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x00500000, 0x00000000), 0x00500000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x1p-1018 = 0x1p-1018
+}
+
+void f51(void) {
+ comp64(double_of_bits(0x80500000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0x80500000, 0x00000000, STR(__LINE__)); // -0x1p-1018 + 0x0.0000000000003p-1022 = -0x1p-1018
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x80500000, 0x00000000), 0x80500000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x1p-1018 = -0x1p-1018
+ comp64(double_of_bits(0x3ff00000, 0x00000007) + double_of_bits(0x40200000, 0x00000000), 0x40220000, 0x00000001, STR(__LINE__)); // 0x1.0000000000007p+0 + 0x1p+3 = 0x1.2000000000001p+3
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000007), 0x40220000, 0x00000001, STR(__LINE__)); // 0x1p+3 + 0x1.0000000000007p+0 = 0x1.2000000000001p+3
+ comp64(double_of_bits(0xbff00000, 0x00000007) + double_of_bits(0xc0200000, 0x00000000), 0xc0220000, 0x00000001, STR(__LINE__)); // -0x1.0000000000007p+0 + -0x1p+3 = -0x1.2000000000001p+3
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0xbff00000, 0x00000007), 0xc0220000, 0x00000001, STR(__LINE__)); // -0x1p+3 + -0x1.0000000000007p+0 = -0x1.2000000000001p+3
+ comp64(double_of_bits(0x401c0000, 0x00000000) + double_of_bits(0x436c0000, 0x00000000), 0x436c0000, 0x00000001, STR(__LINE__)); // 0x1.cp+2 + 0x1.cp+55 = 0x1.c000000000001p+55
+ comp64(double_of_bits(0x436c0000, 0x00000000) + double_of_bits(0x401c0000, 0x00000000), 0x436c0000, 0x00000001, STR(__LINE__)); // 0x1.cp+55 + 0x1.cp+2 = 0x1.c000000000001p+55
+ comp64(double_of_bits(0xc01c0000, 0x00000000) + double_of_bits(0xc36c0000, 0x00000000), 0xc36c0000, 0x00000001, STR(__LINE__)); // -0x1.cp+2 + -0x1.cp+55 = -0x1.c000000000001p+55
+ comp64(double_of_bits(0xc36c0000, 0x00000000) + double_of_bits(0xc01c0000, 0x00000000), 0xc36c0000, 0x00000001, STR(__LINE__)); // -0x1.cp+55 + -0x1.cp+2 = -0x1.c000000000001p+55
+}
+
+void f52(void) {
+ comp64(double_of_bits(0x00000000, 0x00000007) + double_of_bits(0x00400000, 0x00000000), 0x00400000, 0x00000001, STR(__LINE__)); // 0x0.0000000000007p-1022 + 0x1p-1019 = 0x1.0000000000001p-1019
+ comp64(double_of_bits(0x00400000, 0x00000000) + double_of_bits(0x00000000, 0x00000007), 0x00400000, 0x00000001, STR(__LINE__)); // 0x1p-1019 + 0x0.0000000000007p-1022 = 0x1.0000000000001p-1019
+ comp64(double_of_bits(0x80000000, 0x00000007) + double_of_bits(0x80400000, 0x00000000), 0x80400000, 0x00000001, STR(__LINE__)); // -0x0.0000000000007p-1022 + -0x1p-1019 = -0x1.0000000000001p-1019
+ comp64(double_of_bits(0x80400000, 0x00000000) + double_of_bits(0x80000000, 0x00000007), 0x80400000, 0x00000001, STR(__LINE__)); // -0x1p-1019 + -0x0.0000000000007p-1022 = -0x1.0000000000001p-1019
+ comp64(double_of_bits(0x3fffffff, 0xffffffff) + double_of_bits(0x3cc40000, 0x00000001), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp+0 + 0x1.4000000000001p-51 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0x3cc40000, 0x00000001) + double_of_bits(0x3fffffff, 0xffffffff), 0x40000000, 0x00000001, STR(__LINE__)); // 0x1.4000000000001p-51 + 0x1.fffffffffffffp+0 = 0x1.0000000000001p+1
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0xbcc40000, 0x00000001), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp+0 + -0x1.4000000000001p-51 = -0x1.0000000000001p+1
+ comp64(double_of_bits(0xbcc40000, 0x00000001) + double_of_bits(0xbfffffff, 0xffffffff), 0xc0000000, 0x00000001, STR(__LINE__)); // -0x1.4000000000001p-51 + -0x1.fffffffffffffp+0 = -0x1.0000000000001p+1
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x1p+0 = 0x1p+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1p+1023 = 0x1p+1023
+}
+
+void f53(void) {
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x1p+0 = -0x1p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0xbff00000, 0x00000000), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + -0x1p+0 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x7fdfffff, 0xffffffff), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 + 0x1.fffffffffffffp+1022 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0x3ff00000, 0x00000000), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + 0x1p+0 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xffdfffff, 0xffffffff), 0xffdfffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 + -0x1.fffffffffffffp+1022 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0xbff00000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + -0x1p+0 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x3ff00000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + 0x1p+0 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+}
+
+void f54(void) {
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0xbff00000, 0x00000000), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + -0x1p+0 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x7fefffff, 0xfffffffe), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 + 0x1.ffffffffffffep+1023 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0x3ff00000, 0x00000000), 0xffefffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + 0x1p+0 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xffefffff, 0xfffffffe), 0xffefffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 + -0x1.ffffffffffffep+1023 = -0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + -0x0.0000000000001p-1022 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x7fefffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x1.fffffffffffffp+1023 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + 0x0.0000000000001p-1022 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0xffefffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x1.fffffffffffffp+1023 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x7fe00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x1p+1023 = 0x1p+1023
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x0.0000000000003p-1022 = 0x1p+1023
+}
+
+void f55(void) {
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0xffe00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x1p+1023 = -0x1p+1023
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x0.0000000000003p-1022 = -0x1p+1023
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp-1 + -0x0.0000000000001p-1022 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x1.fffffffffffffp-1 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0xbfffffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+0 + 0x0.0000000000001p-1022 = -0x1.fffffffffffffp+0
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0xbfffffff, 0xffffffff), 0xbfffffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x1.fffffffffffffp+0 = -0x1.fffffffffffffp+0
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x40080000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x1.8p+1 = 0x1.8p+1
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0x80000000, 0x00000003), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 + -0x0.0000000000003p-1022 = 0x1.8p+1
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0xc0140000, 0x00000000), 0xc0140000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x1.4p+2 = -0x1.4p+2
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x00000000, 0x00000003), 0xc0140000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x0.0000000000003p-1022 = -0x1.4p+2
+}
+
+void f56(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1p+1 = 0x0p+0
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1p+1 = 0x0p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x1.4p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x1.4p+2 = 0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x1p+1023 = 0x0p+0
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x1p+1023 = 0x0p+0
+ comp64(double_of_bits(0xffdfffff, 0xfffffffe) + double_of_bits(0x7fdfffff, 0xfffffffe), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1022 + 0x1.ffffffffffffep+1022 = 0x0p+0
+ comp64(double_of_bits(0x7fdfffff, 0xfffffffe) + double_of_bits(0xffdfffff, 0xfffffffe), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1022 + -0x1.ffffffffffffep+1022 = 0x0p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1p+0 = 0x0p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1p+0 = 0x0p+0
+}
+
+void f57(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 + 0x1.8p+1 = 0x0p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0xc0080000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 + -0x1.8p+1 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) + double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 + -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) + double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 + 0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xfffffffc) + double_of_bits(0x800fffff, 0xfffffffc), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffffcp-1022 + -0x0.ffffffffffffcp-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xfffffffc) + double_of_bits(0x000fffff, 0xfffffffc), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffffcp-1022 + 0x0.ffffffffffffcp-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x000fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + 0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x800fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + -0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x0.0000000000001p-1022 = 0x0p+0
+}
+
+void f58(void) {
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0xffefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + -0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x7fefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + 0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0xbff00000, 0x00000000), 0x3cb00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + -0x1p+0 = 0x1p-52
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x3cb00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1.0000000000001p+0 = 0x1p-52
+ comp64(double_of_bits(0x7fe00000, 0x00000001) + double_of_bits(0xffe00000, 0x00000000), 0x7ca00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1023 + -0x1p+1023 = 0x1p+971
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x7fe00000, 0x00000001), 0x7ca00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x1.0000000000001p+1023 = 0x1p+971
+ comp64(double_of_bits(0x00100000, 0x00000001) + double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 + -0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) + double_of_bits(0x00100000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 + 0x1.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x40000000, 0x00000001), 0x3cc00000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1.0000000000001p+1 = 0x1p-51
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0xc0000000, 0x00000000), 0x3cc00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1 + -0x1p+1 = 0x1p-51
+}
+
+void f59(void) {
+ comp64(double_of_bits(0xffd00000, 0x00000000) + double_of_bits(0x7fd00000, 0x00000001), 0x7c900000, 0x00000000, STR(__LINE__)); // -0x1p+1022 + 0x1.0000000000001p+1022 = 0x1p+970
+ comp64(double_of_bits(0x7fd00000, 0x00000001) + double_of_bits(0xffd00000, 0x00000000), 0x7c900000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1022 + -0x1p+1022 = 0x1p+970
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0x3ff00000, 0x00000000), 0xbcb00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + 0x1p+0 = -0x1p-52
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xbcb00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1.0000000000001p+0 = -0x1p-52
+ comp64(double_of_bits(0xffe00000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000000), 0xfca00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 + 0x1p+1023 = -0x1p+971
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0xffe00000, 0x00000001), 0xfca00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x1.0000000000001p+1023 = -0x1p+971
+ comp64(double_of_bits(0x80100000, 0x00000001) + double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 + 0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) + double_of_bits(0x80100000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 + -0x1.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xc0000000, 0x00000001), 0xbcc00000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1.0000000000001p+1 = -0x1p-51
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0x40000000, 0x00000000), 0xbcc00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1 + 0x1p+1 = -0x1p-51
+}
+
+void f60(void) {
+ comp64(double_of_bits(0x7fd00000, 0x00000000) + double_of_bits(0xffd00000, 0x00000001), 0xfc900000, 0x00000000, STR(__LINE__)); // 0x1p+1022 + -0x1.0000000000001p+1022 = -0x1p+970
+ comp64(double_of_bits(0xffd00000, 0x00000001) + double_of_bits(0x7fd00000, 0x00000000), 0xfc900000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1022 + 0x1p+1022 = -0x1p+970
+ comp64(double_of_bits(0x00000000, 0x00000002) + double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000002p-1022 + -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 + 0x0.0000000000002p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0x3ff00000, 0x00000002), 0x3cb00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + 0x1.0000000000002p+0 = 0x1p-52
+ comp64(double_of_bits(0x3ff00000, 0x00000002) + double_of_bits(0xbff00000, 0x00000001), 0x3cb00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+0 + -0x1.0000000000001p+0 = 0x1p-52
+ comp64(double_of_bits(0xffe00000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000002), 0x7ca00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 + 0x1.0000000000002p+1023 = 0x1p+971
+ comp64(double_of_bits(0x7fe00000, 0x00000002) + double_of_bits(0xffe00000, 0x00000001), 0x7ca00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+1023 + -0x1.0000000000001p+1023 = 0x1p+971
+ comp64(double_of_bits(0x80100000, 0x00000001) + double_of_bits(0x00100000, 0x00000002), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 + 0x1.0000000000002p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000002) + double_of_bits(0x80100000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p-1022 + -0x1.0000000000001p-1022 = 0x0.0000000000001p-1022
+}
+
+void f61(void) {
+ comp64(double_of_bits(0x80000000, 0x00000002) + double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000002p-1022 + 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 + -0x0.0000000000002p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0xbff00000, 0x00000002), 0xbcb00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + -0x1.0000000000002p+0 = -0x1p-52
+ comp64(double_of_bits(0xbff00000, 0x00000002) + double_of_bits(0x3ff00000, 0x00000001), 0xbcb00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+0 + 0x1.0000000000001p+0 = -0x1p-52
+ comp64(double_of_bits(0x7fe00000, 0x00000001) + double_of_bits(0xffe00000, 0x00000002), 0xfca00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1023 + -0x1.0000000000002p+1023 = -0x1p+971
+ comp64(double_of_bits(0xffe00000, 0x00000002) + double_of_bits(0x7fe00000, 0x00000001), 0xfca00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+1023 + 0x1.0000000000001p+1023 = -0x1p+971
+ comp64(double_of_bits(0x00100000, 0x00000001) + double_of_bits(0x80100000, 0x00000002), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 + -0x1.0000000000002p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000002) + double_of_bits(0x00100000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p-1022 + 0x1.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) + double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000003p-1022 + 0x0.0000000000002p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) + double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000002p-1022 + -0x0.0000000000003p-1022 = -0x0.0000000000001p-1022
+}
+
+void f62(void) {
+ comp64(double_of_bits(0x00000000, 0x00000003) + double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000003p-1022 + -0x0.0000000000002p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) + double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000002p-1022 + 0x0.0000000000003p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x40000000, 0x00000004) + double_of_bits(0xc0000000, 0x00000003), 0x3cc00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000004p+1 + -0x1.0000000000003p+1 = 0x1p-51
+ comp64(double_of_bits(0xc0000000, 0x00000003) + double_of_bits(0x40000000, 0x00000004), 0x3cc00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000003p+1 + 0x1.0000000000004p+1 = 0x1p-51
+ comp64(double_of_bits(0x7fd00000, 0x00000004) + double_of_bits(0xffd00000, 0x00000003), 0x7c900000, 0x00000000, STR(__LINE__)); // 0x1.0000000000004p+1022 + -0x1.0000000000003p+1022 = 0x1p+970
+ comp64(double_of_bits(0xffd00000, 0x00000003) + double_of_bits(0x7fd00000, 0x00000004), 0x7c900000, 0x00000000, STR(__LINE__)); // -0x1.0000000000003p+1022 + 0x1.0000000000004p+1022 = 0x1p+970
+ comp64(double_of_bits(0xc0000000, 0x00000004) + double_of_bits(0x40000000, 0x00000003), 0xbcc00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+1 + 0x1.0000000000003p+1 = -0x1p-51
+ comp64(double_of_bits(0x40000000, 0x00000003) + double_of_bits(0xc0000000, 0x00000004), 0xbcc00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p+1 + -0x1.0000000000004p+1 = -0x1p-51
+ comp64(double_of_bits(0xffd00000, 0x00000004) + double_of_bits(0x7fd00000, 0x00000003), 0xfc900000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+1022 + 0x1.0000000000003p+1022 = -0x1p+970
+ comp64(double_of_bits(0x7fd00000, 0x00000003) + double_of_bits(0xffd00000, 0x00000004), 0xfc900000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p+1022 + -0x1.0000000000004p+1022 = -0x1p+970
+}
+
+void f63(void) {
+ comp64(double_of_bits(0x400fffff, 0xffffffff) + double_of_bits(0xc00fffff, 0xfffffffe), 0x3cc00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1 + -0x1.ffffffffffffep+1 = 0x1p-51
+ comp64(double_of_bits(0xc00fffff, 0xfffffffe) + double_of_bits(0x400fffff, 0xffffffff), 0x3cc00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1 + 0x1.fffffffffffffp+1 = 0x1p-51
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) + double_of_bits(0xffcfffff, 0xfffffffe), 0x7c800000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 + -0x1.ffffffffffffep+1021 = 0x1p+969
+ comp64(double_of_bits(0xffcfffff, 0xfffffffe) + double_of_bits(0x7fcfffff, 0xffffffff), 0x7c800000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1021 + 0x1.fffffffffffffp+1021 = 0x1p+969
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x800fffff, 0xfffffffe), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + -0x0.ffffffffffffep-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) + double_of_bits(0x000fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.ffffffffffffep-1022 + 0x0.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) + double_of_bits(0x7fefffff, 0xffffffff), 0x7ca00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1023 + 0x1.fffffffffffffp+1023 = 0x1p+971
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) + double_of_bits(0xffefffff, 0xfffffffe), 0x7ca00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 + -0x1.ffffffffffffep+1023 = 0x1p+971
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) + double_of_bits(0x400fffff, 0xfffffffe), 0xbcc00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1 + 0x1.ffffffffffffep+1 = -0x1p-51
+ comp64(double_of_bits(0x400fffff, 0xfffffffe) + double_of_bits(0xc00fffff, 0xffffffff), 0xbcc00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1 + -0x1.fffffffffffffp+1 = -0x1p-51
+}
+
+void f64(void) {
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) + double_of_bits(0x7fcfffff, 0xfffffffe), 0xfc800000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 + 0x1.ffffffffffffep+1021 = -0x1p+969
+ comp64(double_of_bits(0x7fcfffff, 0xfffffffe) + double_of_bits(0xffcfffff, 0xffffffff), 0xfc800000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1021 + -0x1.fffffffffffffp+1021 = -0x1p+969
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x000fffff, 0xfffffffe), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + 0x0.ffffffffffffep-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) + double_of_bits(0x800fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.ffffffffffffep-1022 + -0x0.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) + double_of_bits(0xffefffff, 0xffffffff), 0xfca00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 + -0x1.fffffffffffffp+1023 = -0x1p+971
+ comp64(double_of_bits(0xffefffff, 0xffffffff) + double_of_bits(0x7fefffff, 0xfffffffe), 0xfca00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 + 0x1.ffffffffffffep+1023 = -0x1p+971
+ comp64(double_of_bits(0x000fffff, 0xfffffffd) + double_of_bits(0x800fffff, 0xfffffffe), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.ffffffffffffdp-1022 + -0x0.ffffffffffffep-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) + double_of_bits(0x000fffff, 0xfffffffd), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.ffffffffffffep-1022 + 0x0.ffffffffffffdp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffd) + double_of_bits(0x000fffff, 0xfffffffe), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.ffffffffffffdp-1022 + 0x0.ffffffffffffep-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) + double_of_bits(0x800fffff, 0xfffffffd), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.ffffffffffffep-1022 + -0x0.ffffffffffffdp-1022 = 0x0.0000000000001p-1022
+}
+
+void f65(void) {
+ comp64(double_of_bits(0x3fffffff, 0xfffffffc) + double_of_bits(0xbfffffff, 0xfffffffd), 0xbcb00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp+0 + -0x1.ffffffffffffdp+0 = -0x1p-52
+ comp64(double_of_bits(0xbfffffff, 0xfffffffd) + double_of_bits(0x3fffffff, 0xfffffffc), 0xbcb00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffdp+0 + 0x1.ffffffffffffcp+0 = -0x1p-52
+ comp64(double_of_bits(0xbfffffff, 0xfffffffc) + double_of_bits(0x3fffffff, 0xfffffffd), 0x3cb00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp+0 + 0x1.ffffffffffffdp+0 = 0x1p-52
+ comp64(double_of_bits(0x3fffffff, 0xfffffffd) + double_of_bits(0xbfffffff, 0xfffffffc), 0x3cb00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffdp+0 + -0x1.ffffffffffffcp+0 = 0x1p-52
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + -0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) + double_of_bits(0x000fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 + 0x0.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fffffff, 0xffffffff) + double_of_bits(0xc0000000, 0x00000000), 0xbcb00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+0 + -0x1p+1 = -0x1p-52
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x3fffffff, 0xffffffff), 0xbcb00000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1.fffffffffffffp+0 = -0x1p-52
+ comp64(double_of_bits(0x002fffff, 0xffffffff) + double_of_bits(0x80300000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1.fffffffffffffp-1021 + -0x1p-1020 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80300000, 0x00000000) + double_of_bits(0x002fffff, 0xffffffff), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1p-1020 + 0x1.fffffffffffffp-1021 = -0x0.0000000000002p-1022
+}
+
+void f66(void) {
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) + double_of_bits(0xffe00000, 0x00000000), 0xfc900000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 + -0x1p+1023 = -0x1p+970
+ comp64(double_of_bits(0xffe00000, 0x00000000) + double_of_bits(0x7fdfffff, 0xffffffff), 0xfc900000, 0x00000000, STR(__LINE__)); // -0x1p+1023 + 0x1.fffffffffffffp+1022 = -0x1p+970
+ comp64(double_of_bits(0x001fffff, 0xffffffff) + double_of_bits(0x80200000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 + -0x1p-1021 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80200000, 0x00000000) + double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-1021 + 0x1.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xffd00000, 0x00000000) + double_of_bits(0x7fcfffff, 0xffffffff), 0xfc800000, 0x00000000, STR(__LINE__)); // -0x1p+1022 + 0x1.fffffffffffffp+1021 = -0x1p+969
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) + double_of_bits(0xffd00000, 0x00000000), 0xfc800000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 + -0x1p+1022 = -0x1p+969
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xbfffffff, 0xffffffff), 0x3cb00000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1.fffffffffffffp+0 = 0x1p-52
+ comp64(double_of_bits(0xbfffffff, 0xffffffff) + double_of_bits(0x40000000, 0x00000000), 0x3cb00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+0 + 0x1p+1 = 0x1p-52
+ comp64(double_of_bits(0x7fd00000, 0x00000000) + double_of_bits(0xffcfffff, 0xffffffff), 0x7c800000, 0x00000000, STR(__LINE__)); // 0x1p+1022 + -0x1.fffffffffffffp+1021 = 0x1p+969
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) + double_of_bits(0x7fd00000, 0x00000000), 0x7c800000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 + 0x1p+1022 = 0x1p+969
+}
+
+void f67(void) {
+ comp64(double_of_bits(0x00200000, 0x00000000) + double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-1021 + -0x1.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) + double_of_bits(0x00200000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 + 0x1p-1021 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00300000, 0x00000000) + double_of_bits(0x802fffff, 0xffffffff), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1p-1020 + -0x1.fffffffffffffp-1021 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x802fffff, 0xffffffff) + double_of_bits(0x00300000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1.fffffffffffffp-1021 + 0x1p-1020 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + 0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) + double_of_bits(0x800fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 + -0x0.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0x7fe00000, 0x00000000), 0x7c900000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + 0x1p+1023 = 0x1p+970
+ comp64(double_of_bits(0x7fe00000, 0x00000000) + double_of_bits(0xffdfffff, 0xffffffff), 0x7c900000, 0x00000000, STR(__LINE__)); // 0x1p+1023 + -0x1.fffffffffffffp+1022 = 0x1p+970
+ comp64(double_of_bits(0x400fffff, 0xffffffff) + double_of_bits(0xc0100000, 0x00000001), 0xbcd80000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1 + -0x1.0000000000001p+2 = -0x1.8p-50
+ comp64(double_of_bits(0xc0100000, 0x00000001) + double_of_bits(0x400fffff, 0xffffffff), 0xbcd80000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+2 + 0x1.fffffffffffffp+1 = -0x1.8p-50
+}
+
+void f68(void) {
+ comp64(double_of_bits(0xffb00000, 0x00000001) + double_of_bits(0x7fafffff, 0xffffffff), 0xfc780000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1020 + 0x1.fffffffffffffp+1019 = -0x1.8p+968
+ comp64(double_of_bits(0x7fafffff, 0xffffffff) + double_of_bits(0xffb00000, 0x00000001), 0xfc780000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1019 + -0x1.0000000000001p+1020 = -0x1.8p+968
+ comp64(double_of_bits(0x80200000, 0x00000001) + double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p-1021 + 0x1.fffffffffffffp-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) + double_of_bits(0x80200000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // 0x1.fffffffffffffp-1022 + -0x1.0000000000001p-1021 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80300000, 0x00000001) + double_of_bits(0x002fffff, 0xffffffff), 0x80000000, 0x00000006, STR(__LINE__)); // -0x1.0000000000001p-1020 + 0x1.fffffffffffffp-1021 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x002fffff, 0xffffffff) + double_of_bits(0x80300000, 0x00000001), 0x80000000, 0x00000006, STR(__LINE__)); // 0x1.fffffffffffffp-1021 + -0x1.0000000000001p-1020 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) + double_of_bits(0x40100000, 0x00000001), 0x3cd80000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1 + 0x1.0000000000001p+2 = 0x1.8p-50
+ comp64(double_of_bits(0x40100000, 0x00000001) + double_of_bits(0xc00fffff, 0xffffffff), 0x3cd80000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+2 + -0x1.fffffffffffffp+1 = 0x1.8p-50
+ comp64(double_of_bits(0x7fb00000, 0x00000001) + double_of_bits(0xffafffff, 0xffffffff), 0x7c780000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1020 + -0x1.fffffffffffffp+1019 = 0x1.8p+968
+ comp64(double_of_bits(0xffafffff, 0xffffffff) + double_of_bits(0x7fb00000, 0x00000001), 0x7c780000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1019 + 0x1.0000000000001p+1020 = 0x1.8p+968
+}
+
+void f69(void) {
+ comp64(double_of_bits(0x00200000, 0x00000001) + double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p-1021 + -0x1.fffffffffffffp-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) + double_of_bits(0x00200000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // -0x1.fffffffffffffp-1022 + 0x1.0000000000001p-1021 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00300000, 0x00000001) + double_of_bits(0x802fffff, 0xffffffff), 0x00000000, 0x00000006, STR(__LINE__)); // 0x1.0000000000001p-1020 + -0x1.fffffffffffffp-1021 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x802fffff, 0xffffffff) + double_of_bits(0x00300000, 0x00000001), 0x00000000, 0x00000006, STR(__LINE__)); // -0x1.fffffffffffffp-1021 + 0x1.0000000000001p-1020 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x400fffff, 0xffffffff) + double_of_bits(0xc0100000, 0x00000002), 0xbce40000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1 + -0x1.0000000000002p+2 = -0x1.4p-49
+ comp64(double_of_bits(0xc0100000, 0x00000002) + double_of_bits(0x400fffff, 0xffffffff), 0xbce40000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+2 + 0x1.fffffffffffffp+1 = -0x1.4p-49
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) + double_of_bits(0xffd00000, 0x00000002), 0xfca40000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 + -0x1.0000000000002p+1022 = -0x1.4p+971
+ comp64(double_of_bits(0xffd00000, 0x00000002) + double_of_bits(0x7fcfffff, 0xffffffff), 0xfca40000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+1022 + 0x1.fffffffffffffp+1021 = -0x1.4p+971
+ comp64(double_of_bits(0x001fffff, 0xffffffff) + double_of_bits(0x80200000, 0x00000002), 0x80000000, 0x00000005, STR(__LINE__)); // 0x1.fffffffffffffp-1022 + -0x1.0000000000002p-1021 = -0x0.0000000000005p-1022
+ comp64(double_of_bits(0x80200000, 0x00000002) + double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000005, STR(__LINE__)); // -0x1.0000000000002p-1021 + 0x1.fffffffffffffp-1022 = -0x0.0000000000005p-1022
+}
+
+void f70(void) {
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) + double_of_bits(0x40100000, 0x00000002), 0x3ce40000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1 + 0x1.0000000000002p+2 = 0x1.4p-49
+ comp64(double_of_bits(0x40100000, 0x00000002) + double_of_bits(0xc00fffff, 0xffffffff), 0x3ce40000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+2 + -0x1.fffffffffffffp+1 = 0x1.4p-49
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) + double_of_bits(0x7fd00000, 0x00000002), 0x7ca40000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 + 0x1.0000000000002p+1022 = 0x1.4p+971
+ comp64(double_of_bits(0x7fd00000, 0x00000002) + double_of_bits(0xffcfffff, 0xffffffff), 0x7ca40000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+1022 + -0x1.fffffffffffffp+1021 = 0x1.4p+971
+ comp64(double_of_bits(0x801fffff, 0xffffffff) + double_of_bits(0x00200000, 0x00000002), 0x00000000, 0x00000005, STR(__LINE__)); // -0x1.fffffffffffffp-1022 + 0x1.0000000000002p-1021 = 0x0.0000000000005p-1022
+ comp64(double_of_bits(0x00200000, 0x00000002) + double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000005, STR(__LINE__)); // 0x1.0000000000002p-1021 + -0x1.fffffffffffffp-1022 = 0x0.0000000000005p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) + double_of_bits(0x80200000, 0x00000004), 0x80000000, 0x00000009, STR(__LINE__)); // 0x1.fffffffffffffp-1022 + -0x1.0000000000004p-1021 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x80200000, 0x00000004) + double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000009, STR(__LINE__)); // -0x1.0000000000004p-1021 + 0x1.fffffffffffffp-1022 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) + double_of_bits(0x00200000, 0x00000004), 0x00000000, 0x00000009, STR(__LINE__)); // -0x1.fffffffffffffp-1022 + 0x1.0000000000004p-1021 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x00200000, 0x00000004) + double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000009, STR(__LINE__)); // 0x1.0000000000004p-1021 + -0x1.fffffffffffffp-1022 = 0x0.0000000000009p-1022
+}
+
+void f71(void) {
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1 + -0x1.0000000000001p+0 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0x40000000, 0x00000001), 0x3ff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+0 + 0x1.0000000000001p+1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x00200000, 0x00000001) + double_of_bits(0x80100000, 0x00000001), 0x00100000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1021 + -0x1.0000000000001p-1022 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) + double_of_bits(0x00200000, 0x00000001), 0x00100000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 + 0x1.0000000000001p-1021 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1 + 0x1.0000000000001p+0 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0xc0000000, 0x00000001), 0xbff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+0 + -0x1.0000000000001p+1 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x80200000, 0x00000001) + double_of_bits(0x00100000, 0x00000001), 0x80100000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1021 + 0x1.0000000000001p-1022 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) + double_of_bits(0x80200000, 0x00000001), 0x80100000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 + -0x1.0000000000001p-1021 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0xffd00000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000001), 0x7fd00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1022 + 0x1.0000000000001p+1023 = 0x1.0000000000001p+1022
+ comp64(double_of_bits(0x7fe00000, 0x00000001) + double_of_bits(0xffd00000, 0x00000001), 0x7fd00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1023 + -0x1.0000000000001p+1022 = 0x1.0000000000001p+1022
+}
+
+void f72(void) {
+ comp64(double_of_bits(0x7fd00000, 0x00000001) + double_of_bits(0xffe00000, 0x00000001), 0xffd00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1022 + -0x1.0000000000001p+1023 = -0x1.0000000000001p+1022
+ comp64(double_of_bits(0xffe00000, 0x00000001) + double_of_bits(0x7fd00000, 0x00000001), 0xffd00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1023 + 0x1.0000000000001p+1022 = -0x1.0000000000001p+1022
+ comp64(double_of_bits(0x40000000, 0x00000002) + double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+1 + -0x1.0000000000001p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0x40000000, 0x00000002), 0x3ff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 + 0x1.0000000000002p+1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000002) + double_of_bits(0xffd00000, 0x00000001), 0x7fd00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+1023 + -0x1.0000000000001p+1022 = 0x1.0000000000003p+1022
+ comp64(double_of_bits(0xffd00000, 0x00000001) + double_of_bits(0x7fe00000, 0x00000002), 0x7fd00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+1022 + 0x1.0000000000002p+1023 = 0x1.0000000000003p+1022
+ comp64(double_of_bits(0x00200000, 0x00000002) + double_of_bits(0x80100000, 0x00000001), 0x00100000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p-1021 + -0x1.0000000000001p-1022 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) + double_of_bits(0x00200000, 0x00000002), 0x00100000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p-1022 + 0x1.0000000000002p-1021 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000002) + double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000002p+1 + 0x1.0000000000001p+0 = -0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0xc0000000, 0x00000002), 0xbff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 + -0x1.0000000000002p+1 = -0x1.0000000000003p+0
+}
+
+void f73(void) {
+ comp64(double_of_bits(0xffe00000, 0x00000002) + double_of_bits(0x7fd00000, 0x00000001), 0xffd00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000002p+1023 + 0x1.0000000000001p+1022 = -0x1.0000000000003p+1022
+ comp64(double_of_bits(0x7fd00000, 0x00000001) + double_of_bits(0xffe00000, 0x00000002), 0xffd00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+1022 + -0x1.0000000000002p+1023 = -0x1.0000000000003p+1022
+ comp64(double_of_bits(0x80200000, 0x00000002) + double_of_bits(0x00100000, 0x00000001), 0x80100000, 0x00000003, STR(__LINE__)); // -0x1.0000000000002p-1021 + 0x1.0000000000001p-1022 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) + double_of_bits(0x80200000, 0x00000002), 0x80100000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p-1022 + -0x1.0000000000002p-1021 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0x40000000, 0x00000002) + double_of_bits(0xbff00000, 0x00000003), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p+1 + -0x1.0000000000003p+0 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000003) + double_of_bits(0x40000000, 0x00000002), 0x3ff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p+0 + 0x1.0000000000002p+1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x7fd00000, 0x00000002) + double_of_bits(0xffc00000, 0x00000003), 0x7fc00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p+1022 + -0x1.0000000000003p+1021 = 0x1.0000000000001p+1021
+ comp64(double_of_bits(0xffc00000, 0x00000003) + double_of_bits(0x7fd00000, 0x00000002), 0x7fc00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p+1021 + 0x1.0000000000002p+1022 = 0x1.0000000000001p+1021
+ comp64(double_of_bits(0x00300000, 0x00000002) + double_of_bits(0x80200000, 0x00000003), 0x00200000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p-1020 + -0x1.0000000000003p-1021 = 0x1.0000000000001p-1021
+ comp64(double_of_bits(0x80200000, 0x00000003) + double_of_bits(0x00300000, 0x00000002), 0x00200000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p-1021 + 0x1.0000000000002p-1020 = 0x1.0000000000001p-1021
+}
+
+void f74(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000002) + double_of_bits(0x3ff00000, 0x00000003), 0xbff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p+1 + 0x1.0000000000003p+0 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000003) + double_of_bits(0xc0000000, 0x00000002), 0xbff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p+0 + -0x1.0000000000002p+1 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0xffd00000, 0x00000002) + double_of_bits(0x7fc00000, 0x00000003), 0xffc00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p+1022 + 0x1.0000000000003p+1021 = -0x1.0000000000001p+1021
+ comp64(double_of_bits(0x7fc00000, 0x00000003) + double_of_bits(0xffd00000, 0x00000002), 0xffc00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p+1021 + -0x1.0000000000002p+1022 = -0x1.0000000000001p+1021
+ comp64(double_of_bits(0x80300000, 0x00000002) + double_of_bits(0x00200000, 0x00000003), 0x80200000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p-1020 + 0x1.0000000000003p-1021 = -0x1.0000000000001p-1021
+ comp64(double_of_bits(0x00200000, 0x00000003) + double_of_bits(0x80300000, 0x00000002), 0x80200000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p-1021 + -0x1.0000000000002p-1020 = -0x1.0000000000001p-1021
+ comp64(double_of_bits(0x3ff00000, 0x00000002) + double_of_bits(0xbff00000, 0x00000000), 0x3cc00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+0 + -0x1p+0 = 0x1p-51
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000002), 0x3cc00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1.0000000000002p+0 = 0x1p-51
+ comp64(double_of_bits(0xbff00000, 0x00000002) + double_of_bits(0x3ff00000, 0x00000000), 0xbcc00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+0 + 0x1p+0 = -0x1p-51
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000002), 0xbcc00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1.0000000000002p+0 = -0x1p-51
+}
+
+void f75(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000004) + double_of_bits(0xbff00000, 0x00000000), 0x3cd00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000004p+0 + -0x1p+0 = 0x1p-50
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000004), 0x3cd00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1.0000000000004p+0 = 0x1p-50
+ comp64(double_of_bits(0xbff00000, 0x00000004) + double_of_bits(0x3ff00000, 0x00000000), 0xbcd00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+0 + 0x1p+0 = -0x1p-50
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000004), 0xbcd00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1.0000000000004p+0 = -0x1p-50
+ comp64(double_of_bits(0x3ff00000, 0x00000008) + double_of_bits(0xbff00000, 0x00000000), 0x3ce00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000008p+0 + -0x1p+0 = 0x1p-49
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000008), 0x3ce00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1.0000000000008p+0 = 0x1p-49
+ comp64(double_of_bits(0xbff00000, 0x00000008) + double_of_bits(0x3ff00000, 0x00000000), 0xbce00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+0 + 0x1p+0 = -0x1p-49
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0xbff00000, 0x00000008), 0xbce00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1.0000000000008p+0 = -0x1p-49
+ comp64(double_of_bits(0x40310000, 0x00000000) + double_of_bits(0xc0300000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.1p+4 + -0x1p+4 = 0x1p+0
+ comp64(double_of_bits(0xc0300000, 0x00000000) + double_of_bits(0x40310000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+4 + 0x1.1p+4 = 0x1p+0
+}
+
+void f76(void) {
+ comp64(double_of_bits(0xc0310000, 0x00000000) + double_of_bits(0x40300000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.1p+4 + 0x1p+4 = -0x1p+0
+ comp64(double_of_bits(0x40300000, 0x00000000) + double_of_bits(0xc0310000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+4 + -0x1.1p+4 = -0x1p+0
+ comp64(double_of_bits(0x40300000, 0x00000000) + double_of_bits(0xc0310000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+4 + -0x1.1p+4 = -0x1p+0
+ comp64(double_of_bits(0xc0310000, 0x00000000) + double_of_bits(0x40300000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.1p+4 + 0x1p+4 = -0x1p+0
+ comp64(double_of_bits(0x40220000, 0x00000000) + double_of_bits(0xc0200000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.2p+3 + -0x1p+3 = 0x1p+0
+ comp64(double_of_bits(0xc0200000, 0x00000000) + double_of_bits(0x40220000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+3 + 0x1.2p+3 = 0x1p+0
+ comp64(double_of_bits(0xc0220000, 0x00000000) + double_of_bits(0x40200000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.2p+3 + 0x1p+3 = -0x1p+0
+ comp64(double_of_bits(0x40200000, 0x00000000) + double_of_bits(0xc0220000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+3 + -0x1.2p+3 = -0x1p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) + double_of_bits(0xc0100000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+2 = 0x1p+0
+ comp64(double_of_bits(0xc0100000, 0x00000000) + double_of_bits(0x40140000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+2 + 0x1.4p+2 = 0x1p+0
+}
+
+void f77(void) {
+ comp64(double_of_bits(0xc0140000, 0x00000000) + double_of_bits(0x40100000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+2 = -0x1p+0
+ comp64(double_of_bits(0x40100000, 0x00000000) + double_of_bits(0xc0140000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+2 + -0x1.4p+2 = -0x1p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 + -0x1p+1 = 0x1p+0
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0x40080000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1.8p+1 = 0x1p+0
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 + 0x1p+1 = -0x1p+0
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0xc0080000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1.8p+1 = -0x1p+0
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1.0000000000001p+0 = 0x1.8p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40000000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+1 = 0x1.8p+1
+ comp64(double_of_bits(0x40240000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40260000, 0x00000000, STR(__LINE__)); // 0x1.4p+3 + 0x1.0000000000001p+0 = 0x1.6p+3
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40240000, 0x00000000), 0x40260000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.4p+3 = 0x1.6p+3
+}
+
+void f78(void) {
+ comp64(double_of_bits(0x40330000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40340000, 0x00000000, STR(__LINE__)); // 0x1.3p+4 + 0x1.0000000000001p+0 = 0x1.4p+4
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40330000, 0x00000000), 0x40340000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.3p+4 = 0x1.4p+4
+ comp64(double_of_bits(0x40400000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40408000, 0x00000000, STR(__LINE__)); // 0x1p+5 + 0x1.0000000000001p+0 = 0x1.08p+5
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40400000, 0x00000000), 0x40408000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+5 = 0x1.08p+5
+ comp64(double_of_bits(0x40504000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40508000, 0x00000000, STR(__LINE__)); // 0x1.04p+6 + 0x1.0000000000001p+0 = 0x1.08p+6
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40504000, 0x00000000), 0x40508000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.04p+6 = 0x1.08p+6
+ comp64(double_of_bits(0x4060a000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x4060c000, 0x00000000, STR(__LINE__)); // 0x1.0ap+7 + 0x1.0000000000001p+0 = 0x1.0cp+7
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x4060a000, 0x00000000), 0x4060c000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0ap+7 = 0x1.0cp+7
+ comp64(double_of_bits(0x40704000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40705000, 0x00000000, STR(__LINE__)); // 0x1.04p+8 + 0x1.0000000000001p+0 = 0x1.05p+8
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40704000, 0x00000000), 0x40705000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.04p+8 = 0x1.05p+8
+}
+
+void f79(void) {
+ comp64(double_of_bits(0x4080a800, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x4080b000, 0x00000000, STR(__LINE__)); // 0x1.0a8p+9 + 0x1.0000000000001p+0 = 0x1.0bp+9
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x4080a800, 0x00000000), 0x4080b000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0a8p+9 = 0x1.0bp+9
+ comp64(double_of_bits(0x40901400, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40901800, 0x00000000, STR(__LINE__)); // 0x1.014p+10 + 0x1.0000000000001p+0 = 0x1.018p+10
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40901400, 0x00000000), 0x40901800, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.014p+10 = 0x1.018p+10
+ comp64(double_of_bits(0x40a00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40a00200, 0x00000000, STR(__LINE__)); // 0x1p+11 + 0x1.0000000000001p+0 = 0x1.002p+11
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40a00000, 0x00000000), 0x40a00200, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+11 = 0x1.002p+11
+ comp64(double_of_bits(0x40b00100, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40b00200, 0x00000000, STR(__LINE__)); // 0x1.001p+12 + 0x1.0000000000001p+0 = 0x1.002p+12
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40b00100, 0x00000000), 0x40b00200, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.001p+12 = 0x1.002p+12
+ comp64(double_of_bits(0x40c00080, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40c00100, 0x00000000, STR(__LINE__)); // 0x1.0008p+13 + 0x1.0000000000001p+0 = 0x1.001p+13
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40c00080, 0x00000000), 0x40c00100, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0008p+13 = 0x1.001p+13
+}
+
+void f80(void) {
+ comp64(double_of_bits(0x40d00000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40d00040, 0x00000000, STR(__LINE__)); // 0x1p+14 + 0x1.0000000000001p+0 = 0x1.0004p+14
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40d00000, 0x00000000), 0x40d00040, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+14 = 0x1.0004p+14
+ comp64(double_of_bits(0x40e00180, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40e001a0, 0x00000000, STR(__LINE__)); // 0x1.0018p+15 + 0x1.0000000000001p+0 = 0x1.001ap+15
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40e00180, 0x00000000), 0x40e001a0, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0018p+15 = 0x1.001ap+15
+ comp64(double_of_bits(0x40f00130, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x40f00140, 0x00000000, STR(__LINE__)); // 0x1.0013p+16 + 0x1.0000000000001p+0 = 0x1.0014p+16
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x40f00130, 0x00000000), 0x40f00140, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0013p+16 = 0x1.0014p+16
+ comp64(double_of_bits(0x41000000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41000008, 0x00000000, STR(__LINE__)); // 0x1p+17 + 0x1.0000000000001p+0 = 0x1.00008p+17
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41000000, 0x00000000), 0x41000008, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1p+17 = 0x1.00008p+17
+ comp64(double_of_bits(0x41100d60, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41100d64, 0x00000000, STR(__LINE__)); // 0x1.00d6p+18 + 0x1.0000000000001p+0 = 0x1.00d64p+18
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41100d60, 0x00000000), 0x41100d64, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.00d6p+18 = 0x1.00d64p+18
+}
+
+void f81(void) {
+ comp64(double_of_bits(0x41200590, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41200592, 0x00000000, STR(__LINE__)); // 0x1.0059p+19 + 0x1.0000000000001p+0 = 0x1.00592p+19
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41200590, 0x00000000), 0x41200592, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0059p+19 = 0x1.00592p+19
+ comp64(double_of_bits(0x41300590, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41300591, 0x00000000, STR(__LINE__)); // 0x1.0059p+20 + 0x1.0000000000001p+0 = 0x1.00591p+20
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41300590, 0x00000000), 0x41300591, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.0059p+20 = 0x1.00591p+20
+ comp64(double_of_bits(0x413fee9c, 0x00000000) + double_of_bits(0x3ff00000, 0x00000001), 0x413fee9d, 0x00000000, STR(__LINE__)); // 0x1.fee9cp+20 + 0x1.0000000000001p+0 = 0x1.fee9dp+20
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x413fee9c, 0x00000000), 0x413fee9d, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.fee9cp+20 = 0x1.fee9dp+20
+ comp64(double_of_bits(0x41500000, 0x40000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41500000, 0x80000000, STR(__LINE__)); // 0x1.000004p+22 + 0x1.0000000000001p+0 = 0x1.000008p+22
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41500000, 0x40000000), 0x41500000, 0x80000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.000004p+22 = 0x1.000008p+22
+ comp64(double_of_bits(0x41600000, 0x20000000) + double_of_bits(0x3ff00000, 0x00000001), 0x41600000, 0x40000000, STR(__LINE__)); // 0x1.000002p+23 + 0x1.0000000000001p+0 = 0x1.000004p+23
+ comp64(double_of_bits(0x3ff00000, 0x00000001) + double_of_bits(0x41600000, 0x20000000), 0x41600000, 0x40000000, STR(__LINE__)); // 0x1.0000000000001p+0 + 0x1.000002p+23 = 0x1.000004p+23
+}
+
+void f82(void) {
+ comp64(double_of_bits(0x43400000, 0x00000000) + double_of_bits(0x40100000, 0x00000001), 0x43400000, 0x00000002, STR(__LINE__)); // 0x1p+53 + 0x1.0000000000001p+2 = 0x1.0000000000002p+53
+ comp64(double_of_bits(0x40100000, 0x00000001) + double_of_bits(0x43400000, 0x00000000), 0x43400000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+2 + 0x1p+53 = 0x1.0000000000002p+53
+ comp64(double_of_bits(0x43400000, 0x00000000) + double_of_bits(0x40000000, 0x00000001), 0x43400000, 0x00000001, STR(__LINE__)); // 0x1p+53 + 0x1.0000000000001p+1 = 0x1.0000000000001p+53
+ comp64(double_of_bits(0x40000000, 0x00000001) + double_of_bits(0x43400000, 0x00000000), 0x43400000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1 + 0x1p+53 = 0x1.0000000000001p+53
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xbff00000, 0x00000001), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1.0000000000001p+0 = -0x1.8p+1
+ comp64(double_of_bits(0xbff00000, 0x00000001) + double_of_bits(0xc0000000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 + -0x1p+1 = -0x1.8p+1
+ comp64(double_of_bits(0xc3400000, 0x00000000) + double_of_bits(0xc0100000, 0x00000001), 0xc3400000, 0x00000002, STR(__LINE__)); // -0x1p+53 + -0x1.0000000000001p+2 = -0x1.0000000000002p+53
+ comp64(double_of_bits(0xc0100000, 0x00000001) + double_of_bits(0xc3400000, 0x00000000), 0xc3400000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+2 + -0x1p+53 = -0x1.0000000000002p+53
+ comp64(double_of_bits(0xc3400000, 0x00000000) + double_of_bits(0xc0000000, 0x00000001), 0xc3400000, 0x00000001, STR(__LINE__)); // -0x1p+53 + -0x1.0000000000001p+1 = -0x1.0000000000001p+53
+ comp64(double_of_bits(0xc0000000, 0x00000001) + double_of_bits(0xc3400000, 0x00000000), 0xc3400000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1 + -0x1p+53 = -0x1.0000000000001p+53
+}
+
+void f83(void) {
+ comp64(double_of_bits(0x40dfffc0, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x40e00000, 0x00000000, STR(__LINE__)); // 0x1.fffcp+14 + 0x1p+0 = 0x1p+15
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x40dfffc0, 0x00000000), 0x40e00000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.fffcp+14 = 0x1p+15
+ comp64(double_of_bits(0xc0dfffc0, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc0e00000, 0x00000000, STR(__LINE__)); // -0x1.fffcp+14 + -0x1p+0 = -0x1p+15
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc0dfffc0, 0x00000000), 0xc0e00000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.fffcp+14 = -0x1p+15
+ comp64(double_of_bits(0x433fffff, 0xffffffff) + double_of_bits(0x3ff00000, 0x00000000), 0x43400000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+52 + 0x1p+0 = 0x1p+53
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x433fffff, 0xffffffff), 0x43400000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.fffffffffffffp+52 = 0x1p+53
+ comp64(double_of_bits(0xc33fffff, 0xffffffff) + double_of_bits(0xbff00000, 0x00000000), 0xc3400000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+52 + -0x1p+0 = -0x1p+53
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc33fffff, 0xffffffff), 0xc3400000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.fffffffffffffp+52 = -0x1p+53
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x402e0000, 0x00000000), 0x40300000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.ep+3 = 0x1p+4
+ comp64(double_of_bits(0x402e0000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x40300000, 0x00000000, STR(__LINE__)); // 0x1.ep+3 + 0x1p+0 = 0x1p+4
+}
+
+void f84(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc02e0000, 0x00000000), 0xc0300000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.ep+3 = -0x1p+4
+ comp64(double_of_bits(0xc02e0000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc0300000, 0x00000000, STR(__LINE__)); // -0x1.ep+3 + -0x1p+0 = -0x1p+4
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x433fffff, 0xfffffffb), 0x433fffff, 0xfffffffd, STR(__LINE__)); // 0x1p+1 + 0x1.ffffffffffffbp+52 = 0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0x433fffff, 0xfffffffb) + double_of_bits(0x40000000, 0x00000000), 0x433fffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffbp+52 + 0x1p+1 = 0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xc33fffff, 0xfffffffb), 0xc33fffff, 0xfffffffd, STR(__LINE__)); // -0x1p+1 + -0x1.ffffffffffffbp+52 = -0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0xc33fffff, 0xfffffffb) + double_of_bits(0xc0000000, 0x00000000), 0xc33fffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffbp+52 + -0x1p+1 = -0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0x000fffff, 0xffffffff) + double_of_bits(0x00000000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 + 0x0.0000000000001p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x000fffff, 0xffffffff), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x0.fffffffffffffp-1022 = 0x1p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) + double_of_bits(0x80000000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 + -0x0.0000000000001p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x800fffff, 0xffffffff), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x0.fffffffffffffp-1022 = -0x1p-1022
+}
+
+void f85(void) {
+ comp64(double_of_bits(0x000e0000, 0x00000000) + double_of_bits(0x00020000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.ep-1022 + 0x0.2p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x00020000, 0x00000000) + double_of_bits(0x000e0000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.2p-1022 + 0x0.ep-1022 = 0x1p-1022
+ comp64(double_of_bits(0x800e0000, 0x00000000) + double_of_bits(0x80020000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.ep-1022 + -0x0.2p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x80020000, 0x00000000) + double_of_bits(0x800e0000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.2p-1022 + -0x0.ep-1022 = -0x1p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x40dfff40, 0x00000000), 0x40dfffc0, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1.fff4p+14 = 0x1.fffcp+14
+ comp64(double_of_bits(0x40dfff40, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x40dfffc0, 0x00000000, STR(__LINE__)); // 0x1.fff4p+14 + 0x1p+1 = 0x1.fffcp+14
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xc0dfff40, 0x00000000), 0xc0dfffc0, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1.fff4p+14 = -0x1.fffcp+14
+ comp64(double_of_bits(0xc0dfff40, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0xc0dfffc0, 0x00000000, STR(__LINE__)); // -0x1.fff4p+14 + -0x1p+1 = -0x1.fffcp+14
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x433fffff, 0xfffffffe), 0x433fffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 + 0x1.ffffffffffffep+52 = 0x1.fffffffffffffp+52
+ comp64(double_of_bits(0x433fffff, 0xfffffffe) + double_of_bits(0x3ff00000, 0x00000000), 0x433fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep+52 + 0x1p+0 = 0x1.fffffffffffffp+52
+}
+
+void f86(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc33fffff, 0xfffffffe), 0xc33fffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 + -0x1.ffffffffffffep+52 = -0x1.fffffffffffffp+52
+ comp64(double_of_bits(0xc33fffff, 0xfffffffe) + double_of_bits(0xbff00000, 0x00000000), 0xc33fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep+52 + -0x1p+0 = -0x1.fffffffffffffp+52
+ comp64(double_of_bits(0x40080000, 0x00000000) + double_of_bits(0x433fffff, 0xfffffffa), 0x433fffff, 0xfffffffd, STR(__LINE__)); // 0x1.8p+1 + 0x1.ffffffffffffap+52 = 0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0x433fffff, 0xfffffffa) + double_of_bits(0x40080000, 0x00000000), 0x433fffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffap+52 + 0x1.8p+1 = 0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0xc0080000, 0x00000000) + double_of_bits(0xc33fffff, 0xfffffffa), 0xc33fffff, 0xfffffffd, STR(__LINE__)); // -0x1.8p+1 + -0x1.ffffffffffffap+52 = -0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0xc33fffff, 0xfffffffa) + double_of_bits(0xc0080000, 0x00000000), 0xc33fffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffap+52 + -0x1.8p+1 = -0x1.ffffffffffffdp+52
+ comp64(double_of_bits(0x3ff00000, 0x00000000) + double_of_bits(0x402c0000, 0x00000000), 0x402e0000, 0x00000000, STR(__LINE__)); // 0x1p+0 + 0x1.cp+3 = 0x1.ep+3
+ comp64(double_of_bits(0x402c0000, 0x00000000) + double_of_bits(0x3ff00000, 0x00000000), 0x402e0000, 0x00000000, STR(__LINE__)); // 0x1.cp+3 + 0x1p+0 = 0x1.ep+3
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xc02c0000, 0x00000000), 0xc02e0000, 0x00000000, STR(__LINE__)); // -0x1p+0 + -0x1.cp+3 = -0x1.ep+3
+ comp64(double_of_bits(0xc02c0000, 0x00000000) + double_of_bits(0xbff00000, 0x00000000), 0xc02e0000, 0x00000000, STR(__LINE__)); // -0x1.cp+3 + -0x1p+0 = -0x1.ep+3
+}
+
+void f87(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) + double_of_bits(0x402a0000, 0x00000000), 0x402e0000, 0x00000000, STR(__LINE__)); // 0x1p+1 + 0x1.ap+3 = 0x1.ep+3
+ comp64(double_of_bits(0x402a0000, 0x00000000) + double_of_bits(0x40000000, 0x00000000), 0x402e0000, 0x00000000, STR(__LINE__)); // 0x1.ap+3 + 0x1p+1 = 0x1.ep+3
+ comp64(double_of_bits(0xc0000000, 0x00000000) + double_of_bits(0xc02a0000, 0x00000000), 0xc02e0000, 0x00000000, STR(__LINE__)); // -0x1p+1 + -0x1.ap+3 = -0x1.ep+3
+ comp64(double_of_bits(0xc02a0000, 0x00000000) + double_of_bits(0xc0000000, 0x00000000), 0xc02e0000, 0x00000000, STR(__LINE__)); // -0x1.ap+3 + -0x1p+1 = -0x1.ep+3
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) + double_of_bits(0x00000000, 0x00000001), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.ffffffffffffep-1022 + 0x0.0000000000001p-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) + double_of_bits(0x000fffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.0000000000001p-1022 + 0x0.ffffffffffffep-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) + double_of_bits(0x80000000, 0x00000001), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.ffffffffffffep-1022 + -0x0.0000000000001p-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) + double_of_bits(0x800fffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.0000000000001p-1022 + -0x0.ffffffffffffep-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000c0000, 0x00000000) + double_of_bits(0x00020000, 0x00000000), 0x000e0000, 0x00000000, STR(__LINE__)); // 0x0.cp-1022 + 0x0.2p-1022 = 0x0.ep-1022
+ comp64(double_of_bits(0x00020000, 0x00000000) + double_of_bits(0x000c0000, 0x00000000), 0x000e0000, 0x00000000, STR(__LINE__)); // 0x0.2p-1022 + 0x0.cp-1022 = 0x0.ep-1022
+}
+
+void f88(void) {
+ comp64(double_of_bits(0x800c0000, 0x00000000) + double_of_bits(0x80020000, 0x00000000), 0x800e0000, 0x00000000, STR(__LINE__)); // -0x0.cp-1022 + -0x0.2p-1022 = -0x0.ep-1022
+ comp64(double_of_bits(0x80020000, 0x00000000) + double_of_bits(0x800c0000, 0x00000000), 0x800e0000, 0x00000000, STR(__LINE__)); // -0x0.2p-1022 + -0x0.cp-1022 = -0x0.ep-1022
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) + double_of_bits(0xbff00000, 0x00000000), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 + -0x1p+0 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) + double_of_bits(0xffdfffff, 0xffffffff), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 + -0x1.fffffffffffffp+1022 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x40e00000, 0x00000000) / double_of_bits(0x40400000, 0x00000000), 0x40900000, 0x00000000, STR(__LINE__)); // 0x1p+15 / 0x1p+5 = 0x1p+10
+ comp64(double_of_bits(0xc0e00000, 0x00000000) / double_of_bits(0xc0400000, 0x00000000), 0x40900000, 0x00000000, STR(__LINE__)); // -0x1p+15 / -0x1p+5 = 0x1p+10
+ comp64(double_of_bits(0x40e00000, 0x00000000) / double_of_bits(0xc0400000, 0x00000000), 0xc0900000, 0x00000000, STR(__LINE__)); // 0x1p+15 / -0x1p+5 = -0x1p+10
+ comp64(double_of_bits(0xc0e00000, 0x00000000) / double_of_bits(0x40400000, 0x00000000), 0xc0900000, 0x00000000, STR(__LINE__)); // -0x1p+15 / 0x1p+5 = -0x1p+10
+ comp64(double_of_bits(0x47700000, 0x00000000) / double_of_bits(0x41300000, 0x00000000), 0x46300000, 0x00000000, STR(__LINE__)); // 0x1p+120 / 0x1p+20 = 0x1p+100
+ comp64(double_of_bits(0xc7700000, 0x00000000) / double_of_bits(0xc1300000, 0x00000000), 0x46300000, 0x00000000, STR(__LINE__)); // -0x1p+120 / -0x1p+20 = 0x1p+100
+}
+
+void f89(void) {
+ comp64(double_of_bits(0xc7700000, 0x00000000) / double_of_bits(0x41300000, 0x00000000), 0xc6300000, 0x00000000, STR(__LINE__)); // -0x1p+120 / 0x1p+20 = -0x1p+100
+ comp64(double_of_bits(0x47700000, 0x00000000) / double_of_bits(0xc1300000, 0x00000000), 0xc6300000, 0x00000000, STR(__LINE__)); // 0x1p+120 / -0x1p+20 = -0x1p+100
+ comp64(double_of_bits(0x43e00000, 0x00000000) / double_of_bits(0x41600000, 0x00000000), 0x42700000, 0x00000000, STR(__LINE__)); // 0x1p+63 / 0x1p+23 = 0x1p+40
+ comp64(double_of_bits(0xc3e00000, 0x00000000) / double_of_bits(0xc1600000, 0x00000000), 0x42700000, 0x00000000, STR(__LINE__)); // -0x1p+63 / -0x1p+23 = 0x1p+40
+ comp64(double_of_bits(0xc3e00000, 0x00000000) / double_of_bits(0x41600000, 0x00000000), 0xc2700000, 0x00000000, STR(__LINE__)); // -0x1p+63 / 0x1p+23 = -0x1p+40
+ comp64(double_of_bits(0x43e00000, 0x00000000) / double_of_bits(0xc1600000, 0x00000000), 0xc2700000, 0x00000000, STR(__LINE__)); // 0x1p+63 / -0x1p+23 = -0x1p+40
+ comp64(double_of_bits(0x42e00000, 0x00000000) / double_of_bits(0x40c00000, 0x00000000), 0x42100000, 0x00000000, STR(__LINE__)); // 0x1p+47 / 0x1p+13 = 0x1p+34
+ comp64(double_of_bits(0xc2e00000, 0x00000000) / double_of_bits(0xc0c00000, 0x00000000), 0x42100000, 0x00000000, STR(__LINE__)); // -0x1p+47 / -0x1p+13 = 0x1p+34
+ comp64(double_of_bits(0xc2e00000, 0x00000000) / double_of_bits(0x40c00000, 0x00000000), 0xc2100000, 0x00000000, STR(__LINE__)); // -0x1p+47 / 0x1p+13 = -0x1p+34
+ comp64(double_of_bits(0x42e00000, 0x00000000) / double_of_bits(0xc0c00000, 0x00000000), 0xc2100000, 0x00000000, STR(__LINE__)); // 0x1p+47 / -0x1p+13 = -0x1p+34
+}
+
+void f90(void) {
+ comp64(double_of_bits(0x40440000, 0x00000000) / double_of_bits(0x40240000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1.4p+5 / 0x1.4p+3 = 0x1p+2
+ comp64(double_of_bits(0xc0440000, 0x00000000) / double_of_bits(0xc0240000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // -0x1.4p+5 / -0x1.4p+3 = 0x1p+2
+ comp64(double_of_bits(0xc0440000, 0x00000000) / double_of_bits(0x40240000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1.4p+5 / 0x1.4p+3 = -0x1p+2
+ comp64(double_of_bits(0x40440000, 0x00000000) / double_of_bits(0xc0240000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x1.4p+5 / -0x1.4p+3 = -0x1p+2
+ comp64(double_of_bits(0x40dffe00, 0x00000000) / double_of_bits(0x40240000, 0x00000000), 0x40a99800, 0x00000000, STR(__LINE__)); // 0x1.ffep+14 / 0x1.4p+3 = 0x1.998p+11
+ comp64(double_of_bits(0x40c38800, 0x00000000) / double_of_bits(0x40240000, 0x00000000), 0x408f4000, 0x00000000, STR(__LINE__)); // 0x1.388p+13 / 0x1.4p+3 = 0x1.f4p+9
+ comp64(double_of_bits(0x40c38800, 0x00000000) / double_of_bits(0x40590000, 0x00000000), 0x40590000, 0x00000000, STR(__LINE__)); // 0x1.388p+13 / 0x1.9p+6 = 0x1.9p+6
+ comp64(double_of_bits(0x40c38800, 0x00000000) / double_of_bits(0x408f4000, 0x00000000), 0x40240000, 0x00000000, STR(__LINE__)); // 0x1.388p+13 / 0x1.f4p+9 = 0x1.4p+3
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 / 0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0xbff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 / -0x1p+0 = 0x1p+0
+}
+
+void f91(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0xbff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 / -0x1p+0 = -0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 / 0x1p+0 = -0x1p+0
+ comp64(double_of_bits(0x40000000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+1 / 0x1p+0 = 0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) / double_of_bits(0xbff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x1p+1 / -0x1p+0 = 0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+1 / 0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) / double_of_bits(0xbff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1p+1 / -0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xbff00000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1p+0 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1p+0 = 0x0.0000000000001p-1022
+}
+
+void f92(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1p+0 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1p+0 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0x7fd00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / 0x1p+1 = 0x1p+1022
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0xffd00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / -0x1p+1 = -0x1p+1022
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) / double_of_bits(0x40000000, 0x00000000), 0xffcfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 / 0x1p+1 = -0x1.fffffffffffffp+1021
+ comp64(double_of_bits(0x7fdfffff, 0xfffffffd) / double_of_bits(0xc0000000, 0x00000000), 0xffcfffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1022 / -0x1p+1 = -0x1.ffffffffffffdp+1021
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0xc0000000, 0x00000000), 0xffdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / -0x1p+1 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x40200000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1p+3 / 0x1p+1 = 0x1p+2
+ comp64(double_of_bits(0xc0200000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // -0x1p+3 / -0x1p+1 = 0x1p+2
+ comp64(double_of_bits(0xc0200000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1p+3 / 0x1p+1 = -0x1p+2
+}
+
+void f93(void) {
+ comp64(double_of_bits(0x40200000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x1p+3 / -0x1p+1 = -0x1p+2
+ comp64(double_of_bits(0x00200000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1p-1021 / -0x1p+1 = -0x1p-1022
+ comp64(double_of_bits(0x00200000, 0x00000003) / double_of_bits(0xc0000000, 0x00000000), 0x80100000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p-1021 / -0x1p+1 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0x00200000, 0x00000001) / double_of_bits(0xc0000000, 0x00000000), 0x80100000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1021 / -0x1p+1 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffe) / double_of_bits(0x40000000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1022 / 0x1p+1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0x00040000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / 0x1p+1 = 0x0.4p-1022
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0x00040000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / -0x1p+1 = 0x0.4p-1022
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0x80040000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / 0x1p+1 = -0x0.4p-1022
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0xc0000000, 0x00000000), 0x80040000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / -0x1p+1 = -0x0.4p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000002p-1022 / 0x1p+1 = 0x0.0000000000001p-1022
+}
+
+void f94(void) {
+ comp64(double_of_bits(0x80000000, 0x00000002) / double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000002p-1022 / -0x1p+1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) / double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000002p-1022 / 0x1p+1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000002p-1022 / -0x1p+1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0x7fdfffff, 0xffffffff), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / 0x1.fffffffffffffp+1022 = 0x1p+1
+ comp64(double_of_bits(0xffe00000, 0x00000001) / double_of_bits(0x7fd00000, 0x00000001), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 / 0x1.0000000000001p+1022 = -0x1p+1
+ comp64(double_of_bits(0x7fe00000, 0x00000003) / double_of_bits(0xffd00000, 0x00000003), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p+1023 / -0x1.0000000000003p+1022 = -0x1p+1
+ comp64(double_of_bits(0x00200000, 0x00000000) / double_of_bits(0x00100000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 / 0x1p-1022 = 0x1p+1
+ comp64(double_of_bits(0x80200000, 0x00000001) / double_of_bits(0x00100000, 0x00000001), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1021 / 0x1.0000000000001p-1022 = -0x1p+1
+ comp64(double_of_bits(0x00200000, 0x00000001) / double_of_bits(0x00100000, 0x00000001), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1021 / 0x1.0000000000001p-1022 = 0x1p+1
+ comp64(double_of_bits(0x00200000, 0x00000003) / double_of_bits(0x80100000, 0x00000003), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p-1021 / -0x1.0000000000003p-1022 = -0x1p+1
+}
+
+void f95(void) {
+ comp64(double_of_bits(0x80200000, 0x00000005) / double_of_bits(0x00100000, 0x00000005), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000005p-1021 / 0x1.0000000000005p-1022 = -0x1p+1
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x00040000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / 0x0.4p-1022 = 0x1p+1
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x80040000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / -0x0.4p-1022 = 0x1p+1
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x00040000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / 0x0.4p-1022 = -0x1p+1
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x80040000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / -0x0.4p-1022 = -0x1p+1
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0x00000000, 0x00000001), 0x40000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 / 0x0.0000000000001p-1022 = 0x1p+1
+ comp64(double_of_bits(0x80000000, 0x00000002) / double_of_bits(0x80000000, 0x00000001), 0x40000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 / -0x0.0000000000001p-1022 = 0x1p+1
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0x80000000, 0x00000001), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 / -0x0.0000000000001p-1022 = -0x1p+1
+ comp64(double_of_bits(0x80000000, 0x00000002) / double_of_bits(0x00000000, 0x00000001), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 / 0x0.0000000000001p-1022 = -0x1p+1
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x3fe00000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / 0x1p-1 = 0x1.8p+2
+}
+
+void f96(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0xbfe00000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / -0x1p-1 = 0x1.8p+2
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0x3fe00000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / 0x1p-1 = -0x1.8p+2
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xbfe00000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x1p-1 = -0x1.8p+2
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x3fe00000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1p-1 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xbfe00000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1p-1 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x3fe00000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1p-1 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xbfe00000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1p-1 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x3fe00000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1p-1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xbfe00000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1p-1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x3fe00000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1p-1 = -0x0.0000000000002p-1022
+}
+
+void f97(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xbfe00000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1p-1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x40180000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+2 = 0x1p-1
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0xc0180000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+2 = 0x1p-1
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0x40180000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+2 = -0x1p-1
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xc0180000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+2 = -0x1p-1
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x001fffff, 0xfffffffe), 0x3fe00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1.ffffffffffffep-1022 = 0x1p-1
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x801fffff, 0xfffffffe), 0x3fe00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1.ffffffffffffep-1022 = 0x1p-1
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x001fffff, 0xfffffffe), 0xbfe00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1.ffffffffffffep-1022 = -0x1p-1
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x801fffff, 0xfffffffe), 0xbfe00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1.ffffffffffffep-1022 = -0x1p-1
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x00000000, 0x00000002), 0x3fe00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x0.0000000000002p-1022 = 0x1p-1
+}
+
+void f98(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x00000000, 0x00000002), 0xbfe00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x0.0000000000002p-1022 = -0x1p-1
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x80000000, 0x00000002), 0x3fe00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x0.0000000000002p-1022 = 0x1p-1
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x80000000, 0x00000002), 0xbfe00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x0.0000000000002p-1022 = -0x1p-1
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x3f600000, 0x00000000), 0x40980000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / 0x1p-9 = 0x1.8p+10
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0xbf600000, 0x00000000), 0x40980000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / -0x1p-9 = 0x1.8p+10
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0x3f600000, 0x00000000), 0xc0980000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / 0x1p-9 = -0x1.8p+10
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xbf600000, 0x00000000), 0xc0980000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x1p-9 = -0x1.8p+10
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x3f600000, 0x00000000), 0x009fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1p-9 = 0x1.ffffffffffffep-1014
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xbf600000, 0x00000000), 0x009fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1p-9 = 0x1.ffffffffffffep-1014
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x3f600000, 0x00000000), 0x809fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1p-9 = -0x1.ffffffffffffep-1014
+}
+
+void f99(void) {
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xbf600000, 0x00000000), 0x809fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1p-9 = -0x1.ffffffffffffep-1014
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x3fc00000, 0x00000000), 0x00000000, 0x00000008, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1p-3 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xbfc00000, 0x00000000), 0x00000000, 0x00000008, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1p-3 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x3fc00000, 0x00000000), 0x80000000, 0x00000008, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1p-3 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xbfc00000, 0x00000000), 0x80000000, 0x00000008, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1p-3 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x40980000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+10 = 0x1p-9
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0x40980000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+10 = -0x1p-9
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0xc0980000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+10 = 0x1p-9
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xc0980000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+10 = -0x1p-9
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x009fffff, 0xfffffffe), 0x3f600000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1.ffffffffffffep-1014 = 0x1p-9
+}
+
+void f100(void) {
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x009fffff, 0xfffffffe), 0xbf600000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1.ffffffffffffep-1014 = -0x1p-9
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x809fffff, 0xfffffffe), 0x3f600000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1.ffffffffffffep-1014 = 0x1p-9
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x809fffff, 0xfffffffe), 0xbf600000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1.ffffffffffffep-1014 = -0x1p-9
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x00000000, 0x00000008), 0x3fc00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x0.0000000000008p-1022 = 0x1p-3
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x00000000, 0x00000008), 0xbfc00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x0.0000000000008p-1022 = -0x1p-3
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x80000000, 0x00000008), 0x3fc00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x0.0000000000008p-1022 = 0x1p-3
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x80000000, 0x00000008), 0xbfc00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x0.0000000000008p-1022 = -0x1p-3
+ comp64(double_of_bits(0x40220000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.2p+3 / 0x1.8p+1 = 0x1.8p+1
+ comp64(double_of_bits(0xc0220000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0x40080000, 0x00000000, STR(__LINE__)); // -0x1.2p+3 / -0x1.8p+1 = 0x1.8p+1
+ comp64(double_of_bits(0xc0220000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1.2p+3 / 0x1.8p+1 = -0x1.8p+1
+}
+
+void f101(void) {
+ comp64(double_of_bits(0x40220000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0xc0080000, 0x00000000, STR(__LINE__)); // 0x1.2p+3 / -0x1.8p+1 = -0x1.8p+1
+ comp64(double_of_bits(0x40180000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1.8p+2 / 0x1.8p+1 = 0x1p+1
+ comp64(double_of_bits(0xc0180000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 / -0x1.8p+1 = 0x1p+1
+ comp64(double_of_bits(0x40180000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1.8p+2 / -0x1.8p+1 = -0x1p+1
+ comp64(double_of_bits(0xc0180000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 / 0x1.8p+1 = -0x1p+1
+ comp64(double_of_bits(0x7fefffff, 0xfffffffd) / double_of_bits(0x40100000, 0x00000000), 0x7fcfffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1023 / 0x1p+2 = 0x1.ffffffffffffdp+1021
+ comp64(double_of_bits(0x7fefffff, 0xfffffffd) / double_of_bits(0xc0100000, 0x00000000), 0xffcfffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1023 / -0x1p+2 = -0x1.ffffffffffffdp+1021
+ comp64(double_of_bits(0xffefffff, 0xfffffffd) / double_of_bits(0x40100000, 0x00000000), 0xffcfffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1023 / 0x1p+2 = -0x1.ffffffffffffdp+1021
+ comp64(double_of_bits(0xffefffff, 0xfffffffd) / double_of_bits(0xc0100000, 0x00000000), 0x7fcfffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1023 / -0x1p+2 = 0x1.ffffffffffffdp+1021
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x40100000, 0x00000000), 0x00020000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / 0x1p+2 = 0x0.2p-1022
+}
+
+void f102(void) {
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0xc0100000, 0x00000000), 0x00020000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / -0x1p+2 = 0x0.2p-1022
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x40100000, 0x00000000), 0x80020000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / 0x1p+2 = -0x0.2p-1022
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0xc0100000, 0x00000000), 0x80020000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / -0x1p+2 = -0x0.2p-1022
+ comp64(double_of_bits(0x00000000, 0x00000004) / double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000004p-1022 / 0x1p+2 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000004p-1022 / -0x1p+2 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000004p-1022 / 0x1p+2 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000004) / double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000004p-1022 / -0x1p+2 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0x7fcfffff, 0xffffffff), 0x40100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / 0x1.fffffffffffffp+1021 = 0x1p+2
+ comp64(double_of_bits(0xffefffff, 0xffffffff) / double_of_bits(0x7fcfffff, 0xffffffff), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 / 0x1.fffffffffffffp+1021 = -0x1p+2
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0xffcfffff, 0xffffffff), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / -0x1.fffffffffffffp+1021 = -0x1p+2
+}
+
+void f103(void) {
+ comp64(double_of_bits(0xffefffff, 0xffffffff) / double_of_bits(0xffcfffff, 0xffffffff), 0x40100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 / -0x1.fffffffffffffp+1021 = 0x1p+2
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x00020000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / 0x0.2p-1022 = 0x1p+2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x80020000, 0x00000000), 0x40100000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / -0x0.2p-1022 = 0x1p+2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x00020000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x0.8p-1022 / 0x0.2p-1022 = -0x1p+2
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x80020000, 0x00000000), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x0.8p-1022 / -0x0.2p-1022 = -0x1p+2
+ comp64(double_of_bits(0x00000000, 0x00000004) / double_of_bits(0x00000000, 0x00000001), 0x40100000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 / 0x0.0000000000001p-1022 = 0x1p+2
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0x80000000, 0x00000001), 0x40100000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 / -0x0.0000000000001p-1022 = 0x1p+2
+ comp64(double_of_bits(0x00000000, 0x00000004) / double_of_bits(0x80000000, 0x00000001), 0xc0100000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 / -0x0.0000000000001p-1022 = -0x1p+2
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0x00000000, 0x00000001), 0xc0100000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 / 0x0.0000000000001p-1022 = -0x1p+2
+ comp64(double_of_bits(0x40140000, 0x00000000) / double_of_bits(0x40140000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 / 0x1.4p+2 = 0x1p+0
+}
+
+void f104(void) {
+ comp64(double_of_bits(0xc0140000, 0x00000000) / double_of_bits(0xc0140000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 / -0x1.4p+2 = 0x1p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) / double_of_bits(0xc0140000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 / -0x1.4p+2 = -0x1p+0
+ comp64(double_of_bits(0xc0140000, 0x00000000) / double_of_bits(0x40140000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 / 0x1.4p+2 = -0x1p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+1 = 0x1p+0
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+1 = 0x1p+0
+ comp64(double_of_bits(0xc0080000, 0x00000000) / double_of_bits(0x40080000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+1 = -0x1p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+1 = -0x1p+0
+ comp64(double_of_bits(0x401c0000, 0x00000000) / double_of_bits(0x401c0000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 / 0x1.cp+2 = 0x1p+0
+ comp64(double_of_bits(0xc01c0000, 0x00000000) / double_of_bits(0xc01c0000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1.cp+2 / -0x1.cp+2 = 0x1p+0
+ comp64(double_of_bits(0x401c0000, 0x00000000) / double_of_bits(0xc01c0000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 / -0x1.cp+2 = -0x1p+0
+}
+
+void f105(void) {
+ comp64(double_of_bits(0xc01c0000, 0x00000000) / double_of_bits(0x401c0000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.cp+2 / 0x1.cp+2 = -0x1p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x00000000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x0.0000000000001p-1022 = 0x1p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x80000000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x0.0000000000001p-1022 = 0x1p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x80000000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x0.0000000000001p-1022 = -0x1p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x00000000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x0.0000000000001p-1022 = -0x1p+0
+ comp64(double_of_bits(0x00000000, 0x00000009) / double_of_bits(0x40220000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000009p-1022 / 0x1.2p+3 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000009) / double_of_bits(0xc0220000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000009p-1022 / -0x1.2p+3 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x0p+0 = nan
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x0p+0 = -nan
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x0p+0 = -nan
+}
+
+void f106(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x0p+0 = nan
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x00000000, 0x00000003), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x0.0000000000003p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x0.0000000000002p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x80000000, 0x00000004), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x0.0000000000004p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x0.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x800fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x0.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x800fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x0p+0 = inf
+}
+
+void f107(void) {
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 / -0x0p+0 = -inf
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x0p+0 = inf
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x0p+0 = -inf
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1p+1 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1.8p+1 = -0x0p+0
+}
+
+void f108(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+2 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x40140000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.4p+2 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x40180000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1.8p+2 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0xc01c0000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1.cp+2 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0xc0200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+3 = 0x0p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 / 0x0p+0 = inf
+ comp64(double_of_bits(0xc0000000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -0x0p+0 = -inf
+ comp64(double_of_bits(0xc0100000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+2 / -0x0p+0 = inf
+ comp64(double_of_bits(0x40140000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 / 0x0p+0 = inf
+}
+
+void f109(void) {
+ comp64(double_of_bits(0xc0180000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x401c0000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 / -0x0p+0 = -inf
+ comp64(double_of_bits(0xc0200000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+3 / -0x0p+0 = inf
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x7fe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p+1023 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x7fd00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1p+1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0xffe00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1p+1023 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0xffd00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x7fdfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.fffffffffffffp+1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x7fcfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1.fffffffffffffp+1021 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0xffcfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1.fffffffffffffp+1021 = -0x0p+0
+}
+
+void f110(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0xffdfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1.fffffffffffffp+1022 = 0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / 0x0p+0 = inf
+ comp64(double_of_bits(0xffd00000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / -0x0p+0 = -inf
+ comp64(double_of_bits(0xffd00000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 / 0x0p+0 = inf
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 / -0x0p+0 = -inf
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p-1022 = 0x0p+0
+}
+
+void f111(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x00200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x80200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x001fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x00100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / 0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x80100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / 0x0p+0 = inf
+ comp64(double_of_bits(0x80200000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p-1021 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x00200000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p-1021 / -0x0p+0 = -inf
+}
+
+void f112(void) {
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / 0x0p+0 = inf
+ comp64(double_of_bits(0x80100000, 0x00000001) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 / 0x0p+0 = -inf
+ comp64(double_of_bits(0x00100000, 0x00000001) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 / -0x0p+0 = -inf
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / -0x0p+0 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x0p+0 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x0p+0 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x0p+0 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x0p+0 = inf
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / inf = 0x0p+0
+}
+
+void f113(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / inf = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 / -inf = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x00000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / 0x0p+0 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x80000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -0x0p+0 = nan
+ comp64(double_of_bits(0x00000000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 / nan = nan
+ comp64(double_of_bits(0x80000000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 / nan = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf / inf = nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // -inf / inf = -nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // inf / -inf = -nan
+}
+
+void f114(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf / -inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x00000000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x0.0000000000001p-1022 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x00000000, 0x00000003), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x0.0000000000003p-1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x80000000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x0.0000000000002p-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x80000000, 0x00000004), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x0.0000000000004p-1022 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x800fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x800fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / inf = 0x0p+0
+}
+
+void f115(void) {
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 / inf = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000002) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 / -inf = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000004) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 / -inf = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / inf = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / inf = -0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -inf = -0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1p+0 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x40000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x1p+1 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xc0080000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1.8p+1 = -inf
+}
+
+void f116(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0xc0100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1p+2 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x40140000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1.4p+2 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x40180000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x1.8p+2 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xc01c0000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1.cp+2 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0xc0200000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1p+3 = inf
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+0 / inf = 0x0p+0
+ comp64(double_of_bits(0xc0000000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+1 / inf = -0x0p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 / -inf = -0x0p+0
+ comp64(double_of_bits(0xc0100000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+2 / -inf = 0x0p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 / inf = 0x0p+0
+}
+
+void f117(void) {
+ comp64(double_of_bits(0xc0180000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 / inf = -0x0p+0
+ comp64(double_of_bits(0x401c0000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 / -inf = -0x0p+0
+ comp64(double_of_bits(0xc0200000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+3 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / inf = 0x0p+0
+ comp64(double_of_bits(0xffd00000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+1022 / inf = -0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / -inf = -0x0p+0
+ comp64(double_of_bits(0xffd00000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+1022 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 / inf = 0x0p+0
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 / inf = -0x0p+0
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / -inf = -0x0p+0
+}
+
+void f118(void) {
+ comp64(double_of_bits(0xffefffff, 0xffffffff) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1p+1023 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x7fd00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x1p+1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1p+1023 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0xffd00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1p+1022 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x7fdfffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1.fffffffffffffp+1022 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xffcfffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1.fffffffffffffp+1021 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0xffefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x00100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1p-1022 = inf
+}
+
+void f119(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x00200000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x1p-1021 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x80200000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1p-1021 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x80100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1p-1022 = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x001fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf / 0x1.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x00100000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf / 0x1.0000000000001p-1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x80100000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // inf / -0x1.0000000000001p-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x801fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf / -0x1.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / inf = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 / inf = -0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 / -inf = -0x0p+0
+}
+
+void f120(void) {
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / -inf = 0x0p+0
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0x7ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / inf = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000001) / double_of_bits(0x7ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 / inf = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000001) / double_of_bits(0xfff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 / -inf = -0x0p+0
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0xfff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / -inf = 0x0p+0
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / inf = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf / nan = nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf / nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / nan = nan
+}
+
+void f121(void) {
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / nan = nan
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / 0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x800fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x00000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / 0x0.0000000000001p-1022 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x80000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -0x0.0000000000001p-1022 = nan
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / nan = nan
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / 0x1p+0 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0xbff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -0x1p+0 = nan
+}
+
+void f122(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1p+0 / nan = nan
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1p+0 / nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0x7fefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / 0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) / double_of_bits(0xffefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan / -0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / nan = nan
+ comp64(double_of_bits(0xffefffff, 0xffffffff) / double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 / nan = nan
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x3fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / 0x1p-1 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) / double_of_bits(0xbfe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 / -0x1p-1 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0xbfe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / -0x1p-1 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) / double_of_bits(0x3fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 / 0x1p-1 = -inf
+}
+
+void f123(void) {
+ comp64(double_of_bits(0x7f600000, 0x00000000) / double_of_bits(0x00a00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1015 / 0x1p-1013 = inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0x00000000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / 0x0.0000000000001p-1022 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) / double_of_bits(0x000fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 / 0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 / 0x1.fffffffffffffp-1 = inf
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0x43500000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / 0x1p+54 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0xc3500000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / -0x1p+54 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0xc3500000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / -0x1p+54 = -0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0x43500000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / 0x1p+54 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1p+2 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1p+2 = 0x0p+0
+}
+
+void f124(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1p+2 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1p+2 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x7fefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xffefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xffefffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1.fffffffffffffp+1023 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x7fefffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1.fffffffffffffp+1023 = -0x0p+0
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0x43400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / 0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0xc3400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / -0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0x43400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / 0x1p+53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0xc3400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / -0x1p+53 = -0x0.0000000000001p-1022
+}
+
+void f125(void) {
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x43300000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1p+52 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xc3300000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1p+52 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x43300000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1p+52 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xc3300000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1p+52 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00180000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.8p-1022 / 0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80180000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.8p-1022 / -0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80180000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.8p-1022 / 0x1p+53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00180000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.8p-1022 / -0x1p+53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) / double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000003p-1022 / 0x1p+2 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000003p-1022 / -0x1p+2 = 0x0.0000000000001p-1022
+}
+
+void f126(void) {
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000003p-1022 / 0x1p+2 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) / double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000003p-1022 / -0x1p+2 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00140000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.4p-1022 / 0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80140000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.4p-1022 / -0x1p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80140000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.4p-1022 / 0x1p+53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00140000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.4p-1022 / -0x1p+53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000005) / double_of_bits(0x40200000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000005p-1022 / 0x1p+3 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000005) / double_of_bits(0xc0200000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000005p-1022 / -0x1p+3 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000005) / double_of_bits(0x40200000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000005p-1022 / 0x1p+3 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000005) / double_of_bits(0xc0200000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000005p-1022 / -0x1p+3 = -0x0.0000000000001p-1022
+}
+
+void f127(void) {
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / 0x1p+53 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / -0x1p+53 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 / 0x1p+53 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 / -0x1p+53 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / 0x1p+1 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / -0x1p+1 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) / double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 / 0x1p+1 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) / double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 / -0x1p+1 = -0x0p+0
+ comp64(double_of_bits(0x00280000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1.8p-1021 / 0x1p+53 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80280000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1.8p-1021 / -0x1p+53 = 0x0.0000000000002p-1022
+}
+
+void f128(void) {
+ comp64(double_of_bits(0x80280000, 0x00000000) / double_of_bits(0x43400000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1.8p-1021 / 0x1p+53 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00280000, 0x00000000) / double_of_bits(0xc3400000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1.8p-1021 / -0x1p+53 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) / double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000003p-1022 / 0x1p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000003p-1022 / -0x1p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) / double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000003p-1022 / 0x1p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) / double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000003p-1022 / -0x1p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffd) / double_of_bits(0x40000000, 0x00000000), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffdp-1022 / 0x1p+1 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffd) / double_of_bits(0xc0000000, 0x00000000), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffdp-1022 / -0x1p+1 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffd) / double_of_bits(0x40000000, 0x00000000), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffdp-1022 / 0x1p+1 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffd) / double_of_bits(0xc0000000, 0x00000000), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffdp-1022 / -0x1p+1 = -0x0.ffffffffffffep-1022
+}
+
+void f129(void) {
+ comp64(double_of_bits(0x001e0000, 0x00000003) / double_of_bits(0x40180000, 0x00000000), 0x00050000, 0x00000000, STR(__LINE__)); // 0x1.e000000000003p-1022 / 0x1.8p+2 = 0x0.5p-1022
+ comp64(double_of_bits(0x001e0000, 0x00000003) / double_of_bits(0x43480000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.e000000000003p-1022 / 0x1.8p+53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x40000000, 0x00000000), 0x00080000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1p+1 = 0x0.8p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xc0000000, 0x00000000), 0x00080000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1p+1 = 0x0.8p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x40000000, 0x00000000), 0x80080000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1p+1 = -0x0.8p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xc0000000, 0x00000000), 0x80080000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1p+1 = -0x0.8p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1022 / 0x1.0000000000001p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1022 / -0x1.0000000000001p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1022 / 0x1.0000000000001p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) / double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1022 / -0x1.0000000000001p+0 = -0x0.fffffffffffffp-1022
+}
+
+void f130(void) {
+ comp64(double_of_bits(0x00100000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000002), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000001p-1022 / 0x1.0000000000002p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x00100000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000006), 0x000fffff, 0xfffffffc, STR(__LINE__)); // 0x1.0000000000002p-1022 / 0x1.0000000000006p+0 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1.0000000000001p+0 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / -0x1.0000000000001p+0 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 / 0x1.0000000000001p+0 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / -0x1.0000000000001p+0 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) / double_of_bits(0x3fefffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.ffffffffffffep-1022 / 0x1.ffffffffffffep-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffff7) / double_of_bits(0x3fefffff, 0xfffffffe), 0x000fffff, 0xfffffff8, STR(__LINE__)); // 0x0.ffffffffffff7p-1022 / 0x1.ffffffffffffep-1 = 0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff8) / double_of_bits(0x3fefffff, 0xfffffffe), 0x800fffff, 0xfffffff9, STR(__LINE__)); // -0x0.ffffffffffff8p-1022 / 0x1.ffffffffffffep-1 = -0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000002), 0x000fffff, 0xfffffffd, STR(__LINE__)); // 0x0.fffffffffffffp-1022 / 0x1.0000000000002p+0 = 0x0.ffffffffffffdp-1022
+}
+
+void f131(void) {
+ comp64(double_of_bits(0x0017ffff, 0xffffffff) / double_of_bits(0x3ff80000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.7ffffffffffffp-1022 / 0x1.8p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x8017ffff, 0xffffffff) / double_of_bits(0x3ff80000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.7ffffffffffffp-1022 / 0x1.8p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x7fd00000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1p+1022 = 0x1p-1022
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) / double_of_bits(0x7fd00000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1 / 0x1p+1022 = -0x1p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0x40000000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / 0x1p+1 = 0x1p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0xc0000000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / -0x1p+1 = 0x1p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) / double_of_bits(0x40000000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 / 0x1p+1 = -0x1p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) / double_of_bits(0xc0000000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 / -0x1p+1 = -0x1p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000001), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 / 0x1.0000000000001p+0 = 0x1.ffffffffffffep-1
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0xbff00000, 0x00000001), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 / -0x1.0000000000001p+0 = 0x1.ffffffffffffep-1
+}
+
+void f132(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000001), 0xbfefffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 / 0x1.0000000000001p+0 = -0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0xbff00000, 0x00000001), 0xbfefffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 / -0x1.0000000000001p+0 = -0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000002), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1p+0 / 0x1.0000000000002p+0 = 0x1.ffffffffffffcp-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000003), 0x3fefffff, 0xfffffffa, STR(__LINE__)); // 0x1p+0 / 0x1.0000000000003p+0 = 0x1.ffffffffffffap-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3ff00000, 0x00000004), 0x3fefffff, 0xfffffff8, STR(__LINE__)); // 0x1p+0 / 0x1.0000000000004p+0 = 0x1.ffffffffffff8p-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffffep-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffffcp-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffff8), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffff8p-1 = 0x1.0000000000004p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000002), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.0000000000002p+0 = 0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000003), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.0000000000003p+0 = 0x1.ffffffffffffcp-1
+}
+
+void f133(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000003), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.0000000000003p+0 = 0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3ff00000, 0x00000004) / double_of_bits(0x3ff00000, 0x00000007), 0x3fefffff, 0xfffffffa, STR(__LINE__)); // 0x1.0000000000004p+0 / 0x1.0000000000007p+0 = 0x1.ffffffffffffap-1
+ comp64(double_of_bits(0x3ff00000, 0x00000006) / double_of_bits(0x3ff00000, 0x00000008), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1.0000000000006p+0 / 0x1.0000000000008p+0 = 0x1.ffffffffffffcp-1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.ffffffffffffdp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.ffffffffffffcp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffb) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.ffffffffffffbp-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.ffffffffffffep-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.ffffffffffffep-1 = 0x1.0000000000003p+0
+}
+
+void f134(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000003) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1.0000000000003p+0 / 0x1.ffffffffffffep-1 = 0x1.0000000000004p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.ffffffffffffcp-1 = 0x1.0000000000004p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000004) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000005, STR(__LINE__)); // 0x1.0000000000004p+0 / 0x1.ffffffffffffep-1 = 0x1.0000000000005p+0
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000001), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.0000000000001p+0 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3ff00000, 0x00000001), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.0000000000001p+0 = 0x1.ffffffffffffcp-1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000002), 0x3fefffff, 0xfffffffb, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.0000000000002p+0 = 0x1.ffffffffffffbp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3ff00000, 0x00000001), 0x3fefffff, 0xfffffffb, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.0000000000001p+0 = 0x1.ffffffffffffbp-1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000003), 0x3fefffff, 0xfffffff9, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.0000000000003p+0 = 0x1.ffffffffffff9p-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3ff00000, 0x00000002), 0x3fefffff, 0xfffffffa, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.0000000000002p+0 = 0x1.ffffffffffffap-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0x3ff00000, 0x00000001), 0x3fefffff, 0xfffffffa, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / 0x1.0000000000001p+0 = 0x1.ffffffffffffap-1
+}
+
+void f135(void) {
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3ff00000, 0x00000004), 0x3fefffff, 0xfffffff7, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.0000000000004p+0 = 0x1.ffffffffffff7p-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3ff00000, 0x00000002), 0x3fefffff, 0xfffffff9, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.0000000000002p+0 = 0x1.ffffffffffff9p-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3ff00000, 0x00000003), 0x3fefffff, 0xfffffff8, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.0000000000003p+0 = 0x1.ffffffffffff8p-1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) / double_of_bits(0xbff00000, 0x00000001), 0x3fefffff, 0xfffffffa, STR(__LINE__)); // -0x1.ffffffffffffcp-1 / -0x1.0000000000001p+0 = 0x1.ffffffffffffap-1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) / double_of_bits(0x3ff00000, 0x00000001), 0xbfefffff, 0xfffffffa, STR(__LINE__)); // -0x1.ffffffffffffcp-1 / 0x1.0000000000001p+0 = -0x1.ffffffffffffap-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0xbff00000, 0x00000001), 0xbfefffff, 0xfffffffa, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / -0x1.0000000000001p+0 = -0x1.ffffffffffffap-1
+ comp64(double_of_bits(0x3ff7ffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff80000, 0x00000001, STR(__LINE__)); // 0x1.7ffffffffffffp+0 / 0x1.ffffffffffffep-1 = 0x1.8000000000001p+0
+ comp64(double_of_bits(0xbff7ffff, 0xffffffff) / double_of_bits(0xbfefffff, 0xfffffffe), 0x3ff80000, 0x00000001, STR(__LINE__)); // -0x1.7ffffffffffffp+0 / -0x1.ffffffffffffep-1 = 0x1.8000000000001p+0
+ comp64(double_of_bits(0xbff7ffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffffe), 0xbff80000, 0x00000001, STR(__LINE__)); // -0x1.7ffffffffffffp+0 / 0x1.ffffffffffffep-1 = -0x1.8000000000001p+0
+ comp64(double_of_bits(0x3ff7ffff, 0xffffffff) / double_of_bits(0xbfefffff, 0xfffffffe), 0xbff80000, 0x00000001, STR(__LINE__)); // 0x1.7ffffffffffffp+0 / -0x1.ffffffffffffep-1 = -0x1.8000000000001p+0
+}
+
+void f136(void) {
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x00100000, 0x00000001), 0x3fdfffff, 0xfffffffe, STR(__LINE__)); // 0x0.8p-1022 / 0x1.0000000000001p-1022 = 0x1.ffffffffffffep-2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x80100000, 0x00000001), 0x3fdfffff, 0xfffffffe, STR(__LINE__)); // -0x0.8p-1022 / -0x1.0000000000001p-1022 = 0x1.ffffffffffffep-2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x00100000, 0x00000001), 0xbfdfffff, 0xfffffffe, STR(__LINE__)); // -0x0.8p-1022 / 0x1.0000000000001p-1022 = -0x1.ffffffffffffep-2
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x80100000, 0x00000001), 0xbfdfffff, 0xfffffffe, STR(__LINE__)); // 0x0.8p-1022 / -0x1.0000000000001p-1022 = -0x1.ffffffffffffep-2
+ comp64(double_of_bits(0x40080000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000001), 0x40080000, 0x00000000, STR(__LINE__)); // 0x1.8000000000002p+1 / 0x1.0000000000001p+0 = 0x1.8p+1
+ comp64(double_of_bits(0xc0080000, 0x00000002) / double_of_bits(0xbff00000, 0x00000001), 0x40080000, 0x00000000, STR(__LINE__)); // -0x1.8000000000002p+1 / -0x1.0000000000001p+0 = 0x1.8p+1
+ comp64(double_of_bits(0x40080000, 0x00000002) / double_of_bits(0xbff00000, 0x00000001), 0xc0080000, 0x00000000, STR(__LINE__)); // 0x1.8000000000002p+1 / -0x1.0000000000001p+0 = -0x1.8p+1
+ comp64(double_of_bits(0xc0080000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000001), 0xc0080000, 0x00000000, STR(__LINE__)); // -0x1.8000000000002p+1 / 0x1.0000000000001p+0 = -0x1.8p+1
+ comp64(double_of_bits(0x000c0000, 0x00000001) / double_of_bits(0x00100000, 0x00000001), 0x3fe80000, 0x00000000, STR(__LINE__)); // 0x0.c000000000001p-1022 / 0x1.0000000000001p-1022 = 0x1.8p-1
+ comp64(double_of_bits(0x800c0000, 0x00000001) / double_of_bits(0x80100000, 0x00000001), 0x3fe80000, 0x00000000, STR(__LINE__)); // -0x0.c000000000001p-1022 / -0x1.0000000000001p-1022 = 0x1.8p-1
+}
+
+void f137(void) {
+ comp64(double_of_bits(0x000c0000, 0x00000001) / double_of_bits(0x80100000, 0x00000001), 0xbfe80000, 0x00000000, STR(__LINE__)); // 0x0.c000000000001p-1022 / -0x1.0000000000001p-1022 = -0x1.8p-1
+ comp64(double_of_bits(0x800c0000, 0x00000001) / double_of_bits(0x00100000, 0x00000001), 0xbfe80000, 0x00000000, STR(__LINE__)); // -0x0.c000000000001p-1022 / 0x1.0000000000001p-1022 = -0x1.8p-1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffffdp-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffffb), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffffbp-1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xfffffff7), 0x3ff00000, 0x00000005, STR(__LINE__)); // 0x1p+0 / 0x1.ffffffffffff7p-1 = 0x1.0000000000005p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xffffffff), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1p+0 / 0x1.fffffffffffffp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0xbfefffff, 0xffffffff), 0x3ff00000, 0x00000001, STR(__LINE__)); // -0x1p+0 / -0x1.fffffffffffffp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) / double_of_bits(0x3fefffff, 0xffffffff), 0xbff00000, 0x00000001, STR(__LINE__)); // -0x1p+0 / 0x1.fffffffffffffp-1 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) / double_of_bits(0xbfefffff, 0xffffffff), 0xbff00000, 0x00000001, STR(__LINE__)); // 0x1p+0 / -0x1.fffffffffffffp-1 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.ffffffffffffep-1 = 0x1.0000000000001p+0
+}
+
+void f138(void) {
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.ffffffffffffdp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.fffffffffffffp-1 / 0x1.ffffffffffffcp-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.ffffffffffffcp-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffa) / double_of_bits(0x3fefffff, 0xfffffff9), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.ffffffffffffap-1 / 0x1.ffffffffffff9p-1 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3fefffff, 0xffffffff), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.fffffffffffffp-1 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3fefffff, 0xffffffff), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.fffffffffffffp-1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.ffffffffffffdp-1 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000003) / double_of_bits(0x3fefffff, 0xffffffff), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1.0000000000003p+0 / 0x1.fffffffffffffp-1 = 0x1.0000000000004p+0
+}
+
+void f139(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.ffffffffffffdp-1 = 0x1.0000000000004p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000003) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3ff00000, 0x00000005, STR(__LINE__)); // 0x1.0000000000003p+0 / 0x1.ffffffffffffdp-1 = 0x1.0000000000005p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) / double_of_bits(0x3fefffff, 0xfffffffb), 0x3ff00000, 0x00000004, STR(__LINE__)); // 0x1.0000000000001p+0 / 0x1.ffffffffffffbp-1 = 0x1.0000000000004p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000005) / double_of_bits(0x3fefffff, 0xffffffff), 0x3ff00000, 0x00000006, STR(__LINE__)); // 0x1.0000000000005p+0 / 0x1.fffffffffffffp-1 = 0x1.0000000000006p+0
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x001fffff, 0xffffffff), 0x3fd00000, 0x00000001, STR(__LINE__)); // 0x0.8p-1022 / 0x1.fffffffffffffp-1022 = 0x1.0000000000001p-2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x801fffff, 0xffffffff), 0x3fd00000, 0x00000001, STR(__LINE__)); // -0x0.8p-1022 / -0x1.fffffffffffffp-1022 = 0x1.0000000000001p-2
+ comp64(double_of_bits(0x80080000, 0x00000000) / double_of_bits(0x001fffff, 0xffffffff), 0xbfd00000, 0x00000001, STR(__LINE__)); // -0x0.8p-1022 / 0x1.fffffffffffffp-1022 = -0x1.0000000000001p-2
+ comp64(double_of_bits(0x00080000, 0x00000000) / double_of_bits(0x801fffff, 0xffffffff), 0xbfd00000, 0x00000001, STR(__LINE__)); // 0x0.8p-1022 / -0x1.fffffffffffffp-1022 = -0x1.0000000000001p-2
+ comp64(double_of_bits(0x3ff80000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000001), 0x3ff80000, 0x00000000, STR(__LINE__)); // 0x1.8000000000001p+0 / 0x1.0000000000001p+0 = 0x1.8p+0
+ comp64(double_of_bits(0xbff80000, 0x00000001) / double_of_bits(0xbff00000, 0x00000001), 0x3ff80000, 0x00000000, STR(__LINE__)); // -0x1.8000000000001p+0 / -0x1.0000000000001p+0 = 0x1.8p+0
+}
+
+void f140(void) {
+ comp64(double_of_bits(0xbff80000, 0x00000001) / double_of_bits(0x3ff00000, 0x00000001), 0xbff80000, 0x00000000, STR(__LINE__)); // -0x1.8000000000001p+0 / 0x1.0000000000001p+0 = -0x1.8p+0
+ comp64(double_of_bits(0x3ff80000, 0x00000001) / double_of_bits(0xbff00000, 0x00000001), 0xbff80000, 0x00000000, STR(__LINE__)); // 0x1.8000000000001p+0 / -0x1.0000000000001p+0 = -0x1.8p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p+0 / 0x1.0000000000001p+0 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000002) / double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p+0 / -0x1.0000000000001p+0 = 0x1.0000000000001p+0
+ comp64(double_of_bits(0xbff00000, 0x00000002) / double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000002p+0 / 0x1.0000000000001p+0 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) / double_of_bits(0xbff00000, 0x00000001), 0xbff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000002p+0 / -0x1.0000000000001p+0 = -0x1.0000000000001p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000003) / double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.0000000000003p+0 / 0x1.0000000000001p+0 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000004) / double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000004p+0 / 0x1.0000000000001p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000007) / double_of_bits(0x3ff00000, 0x00000002), 0x3ff00000, 0x00000005, STR(__LINE__)); // 0x1.0000000000007p+0 / 0x1.0000000000002p+0 = 0x1.0000000000005p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000009) / double_of_bits(0x3ff00000, 0x00000008), 0x3ff00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000009p+0 / 0x1.0000000000008p+0 = 0x1.0000000000001p+0
+}
+
+void f141(void) {
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) / double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1 / 0x1.fffffffffffffp-1 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.fffffffffffffp-1 = 0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffdp-1 / 0x1.ffffffffffffep-1 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / 0x1.fffffffffffffp-1 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3fefffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / 0x1.ffffffffffffep-1 = 0x1.ffffffffffffep-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3fefffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffcp-1 / 0x1.ffffffffffffdp-1 = 0x1.fffffffffffffp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffff8) / double_of_bits(0x3fefffff, 0xfffffffd), 0x3fefffff, 0xfffffffb, STR(__LINE__)); // 0x1.ffffffffffff8p-1 / 0x1.ffffffffffffdp-1 = 0x1.ffffffffffffbp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffff7) / double_of_bits(0x3fefffff, 0xfffffffe), 0x3fefffff, 0xfffffff9, STR(__LINE__)); // 0x1.ffffffffffff7p-1 / 0x1.ffffffffffffep-1 = 0x1.ffffffffffff9p-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffff8) / double_of_bits(0x3fefffff, 0xfffffffc), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffff8p-1 / 0x1.ffffffffffffcp-1 = 0x1.ffffffffffffcp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffff7) / double_of_bits(0x3fefffff, 0xfffffffb), 0x3fefffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffff7p-1 / 0x1.ffffffffffffbp-1 = 0x1.ffffffffffffcp-1
+}
+
+void f142(void) {
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) / double_of_bits(0x001fffff, 0xffffffff), 0x3fdfffff, 0xfffffffd, STR(__LINE__)); // 0x0.ffffffffffffep-1022 / 0x1.fffffffffffffp-1022 = 0x1.ffffffffffffdp-2
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) / double_of_bits(0x801fffff, 0xffffffff), 0x3fdfffff, 0xfffffffd, STR(__LINE__)); // -0x0.ffffffffffffep-1022 / -0x1.fffffffffffffp-1022 = 0x1.ffffffffffffdp-2
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) / double_of_bits(0x001fffff, 0xffffffff), 0xbfdfffff, 0xfffffffd, STR(__LINE__)); // -0x0.ffffffffffffep-1022 / 0x1.fffffffffffffp-1022 = -0x1.ffffffffffffdp-2
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) / double_of_bits(0x801fffff, 0xffffffff), 0xbfdfffff, 0xfffffffd, STR(__LINE__)); // 0x0.ffffffffffffep-1022 / -0x1.fffffffffffffp-1022 = -0x1.ffffffffffffdp-2
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 * 0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 * -0x1p+0 = 0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1p+0 * 0x1p+0 = -0x1p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 * -0x1p+0 = -0x1p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+0 * 0x1p+1 = 0x1p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // 0x1p+1 * 0x1p+0 = 0x1p+1
+}
+
+void f143(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x1p+0 * -0x1p+1 = 0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0x40000000, 0x00000000, STR(__LINE__)); // -0x1p+1 * -0x1p+0 = 0x1p+1
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+0 * 0x1p+1 = -0x1p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1p+1 * -0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // 0x1p+0 * -0x1p+1 = -0x1p+1
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0xc0000000, 0x00000000, STR(__LINE__)); // -0x1p+1 * 0x1p+0 = -0x1p+1
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // 0x1p+1 * 0x1.8p+1 = 0x1.8p+2
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * 0x1p+1 = 0x1.8p+2
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // -0x1p+1 * -0x1.8p+1 = 0x1.8p+2
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0x40180000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * -0x1p+1 = 0x1.8p+2
+}
+
+void f144(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // -0x1p+1 * 0x1.8p+1 = -0x1.8p+2
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * -0x1p+1 = -0x1.8p+2
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // 0x1p+1 * -0x1.8p+1 = -0x1.8p+2
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0xc0180000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * 0x1p+1 = -0x1.8p+2
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0x40220000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * 0x1.8p+1 = 0x1.2p+3
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0x40220000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * -0x1.8p+1 = 0x1.2p+3
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0xc0220000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * 0x1.8p+1 = -0x1.2p+3
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0xc0220000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * -0x1.8p+1 = -0x1.2p+3
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p+0 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p+0 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+}
+
+void f145(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p+0 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p+0 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p+0 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p+0 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p+0 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p+0 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000002p-1022 * 0x1p+0 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1p+0 * 0x0.0000000000002p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000002p-1022 * -0x1p+0 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1p+0 * -0x0.0000000000002p-1022 = 0x0.0000000000002p-1022
+}
+
+void f146(void) {
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000002p-1022 * 0x1p+0 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1p+0 * -0x0.0000000000002p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1p+0 * 0x0.0000000000002p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000002p-1022 * -0x1p+0 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000004p-1022 * 0x1p+0 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0x00000000, 0x00000004, STR(__LINE__)); // 0x1p+0 * 0x0.0000000000004p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000004p-1022 * -0x1p+0 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0x00000000, 0x00000004, STR(__LINE__)); // -0x1p+0 * -0x0.0000000000004p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000004p-1022 * 0x1p+0 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0x80000000, 0x00000004, STR(__LINE__)); // 0x1p+0 * -0x0.0000000000004p-1022 = -0x0.0000000000004p-1022
+}
+
+void f147(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0x80000000, 0x00000004, STR(__LINE__)); // -0x1p+0 * 0x0.0000000000004p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000004p-1022 * -0x1p+0 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep-1022 * 0x1p+0 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffe), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 * 0x1.ffffffffffffep-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep-1022 * -0x1p+0 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffe), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 * -0x1.ffffffffffffep-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep-1022 * 0x1p+0 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffe), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x1p+0 * -0x1.ffffffffffffep-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffe), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x1p+0 * 0x1.ffffffffffffep-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep-1022 * -0x1p+0 = -0x1.ffffffffffffep-1022
+}
+
+void f148(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000009), 0x80000000, 0x00000009, STR(__LINE__)); // 0x1p+0 * -0x0.0000000000009p-1022 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x80000000, 0x00000009) * double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000009, STR(__LINE__)); // -0x0.0000000000009p-1022 * 0x1p+0 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000009), 0x80000000, 0x00000009, STR(__LINE__)); // -0x1p+0 * 0x0.0000000000009p-1022 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x00000000, 0x00000009) * double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000009, STR(__LINE__)); // 0x0.0000000000009p-1022 * -0x1p+0 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000009), 0x00000000, 0x00000009, STR(__LINE__)); // -0x1p+0 * -0x0.0000000000009p-1022 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x80000000, 0x00000009) * double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000009, STR(__LINE__)); // -0x0.0000000000009p-1022 * -0x1p+0 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x00000000, 0x00000009) * double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000009, STR(__LINE__)); // 0x0.0000000000009p-1022 * 0x1p+0 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000009), 0x00000000, 0x00000009, STR(__LINE__)); // 0x1p+0 * 0x0.0000000000009p-1022 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 * -0x0.fffffffffffffp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * 0x1p+0 = -0x0.fffffffffffffp-1022
+}
+
+void f149(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 * 0x0.fffffffffffffp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * -0x1p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1p+0 * -0x0.fffffffffffffp-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -0x1p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * 0x1p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1p+0 * 0x0.fffffffffffffp-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x80100000, 0x00000001, STR(__LINE__)); // 0x1p+0 * -0x1.0000000000001p-1022 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000000), 0x80100000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1p+0 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x80100000, 0x00000001, STR(__LINE__)); // -0x1p+0 * 0x1.0000000000001p-1022 = -0x1.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbff00000, 0x00000000), 0x80100000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1p+0 = -0x1.0000000000001p-1022
+}
+
+void f150(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x00100000, 0x00000001, STR(__LINE__)); // -0x1p+0 * -0x1.0000000000001p-1022 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbff00000, 0x00000000), 0x00100000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1p+0 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000000), 0x00100000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1p+0 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x00100000, 0x00000001, STR(__LINE__)); // 0x1p+0 * 0x1.0000000000001p-1022 = 0x1.0000000000001p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00200000, 0x00000003), 0x00200000, 0x00000003, STR(__LINE__)); // 0x1p+0 * 0x1.0000000000003p-1021 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0x00200000, 0x00000003) * double_of_bits(0x3ff00000, 0x00000000), 0x00200000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p-1021 * 0x1p+0 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00100000, 0x00000009), 0x80100000, 0x00000009, STR(__LINE__)); // -0x1p+0 * 0x1.0000000000009p-1022 = -0x1.0000000000009p-1022
+ comp64(double_of_bits(0x00100000, 0x00000009) * double_of_bits(0xbff00000, 0x00000000), 0x80100000, 0x00000009, STR(__LINE__)); // 0x1.0000000000009p-1022 * -0x1p+0 = -0x1.0000000000009p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x000fffff, 0xfffffffd), 0x000fffff, 0xfffffffd, STR(__LINE__)); // 0x1p+0 * 0x0.ffffffffffffdp-1022 = 0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffd) * double_of_bits(0x3ff00000, 0x00000000), 0x000fffff, 0xfffffffd, STR(__LINE__)); // 0x0.ffffffffffffdp-1022 * 0x1p+0 = 0x0.ffffffffffffdp-1022
+}
+
+void f151(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fdfffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1p+1 * 0x1.fffffffffffffp+1022 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * 0x1p+1 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xffdfffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x1p+1 * -0x1.fffffffffffffp+1022 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * -0x1p+1 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * -0x1p+1 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fdfffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1p+1 * 0x1.fffffffffffffp+1022 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * 0x1p+1 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xffdfffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x1p+1 * -0x1.fffffffffffffp+1022 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000003), 0x7fe00000, 0x00000003, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000003p+1022 = 0x1.0000000000003p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0x7fe00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p+1022 * 0x1p+1 = 0x1.0000000000003p+1023
+}
+
+void f152(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000003), 0x7fe00000, 0x00000003, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000003p+1022 = 0x1.0000000000003p+1023
+ comp64(double_of_bits(0xffd00000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x7fe00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000003p+1022 * -0x1p+1 = 0x1.0000000000003p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000003), 0xffe00000, 0x00000003, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000003p+1022 = -0x1.0000000000003p+1023
+ comp64(double_of_bits(0xffd00000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0xffe00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000003p+1022 * 0x1p+1 = -0x1.0000000000003p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000003), 0xffe00000, 0x00000003, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000003p+1022 = -0x1.0000000000003p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0xffe00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p+1022 * -0x1p+1 = -0x1.0000000000003p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000001), 0x7fe00000, 0x00000001, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000001p+1022 = 0x1.0000000000001p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0x7fe00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1022 * 0x1p+1 = 0x1.0000000000001p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000001), 0x7fe00000, 0x00000001, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000001p+1022 = 0x1.0000000000001p+1023
+ comp64(double_of_bits(0xffd00000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0x7fe00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1022 * -0x1p+1 = 0x1.0000000000001p+1023
+}
+
+void f153(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000001), 0xffe00000, 0x00000001, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000001p+1022 = -0x1.0000000000001p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0xffe00000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1022 * -0x1p+1 = -0x1.0000000000001p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000001), 0xffe00000, 0x00000001, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000001p+1022 = -0x1.0000000000001p+1023
+ comp64(double_of_bits(0xffd00000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0xffe00000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1022 * 0x1p+1 = -0x1.0000000000001p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1 * 0x1p+1022 = 0x1p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1p+1022 * 0x1p+1 = 0x1p+1023
+ comp64(double_of_bits(0x7fd00000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1p+1022 * -0x1p+1 = -0x1p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000000), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1p+1 * 0x1p+1022 = -0x1p+1023
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1p+1 * 0x1.fffffffffffffp+1021 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * 0x1p+1 = 0x1.fffffffffffffp+1022
+}
+
+void f154(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // -0x1p+1 * -0x1.fffffffffffffp+1021 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0x7fdfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * -0x1p+1 = 0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1p+1 * 0x1.fffffffffffffp+1021 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0xffdfffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * -0x1p+1 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0xffdfffff, 0xffffffff, STR(__LINE__)); // 0x1p+1 * -0x1.fffffffffffffp+1021 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0xffdfffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * 0x1p+1 = -0x1.fffffffffffffp+1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xfffffffd), 0x7fdfffff, 0xfffffffd, STR(__LINE__)); // 0x1p+1 * 0x1.ffffffffffffdp+1021 = 0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0x7fcfffff, 0xfffffffd) * double_of_bits(0x40000000, 0x00000000), 0x7fdfffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1021 * 0x1p+1 = 0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0xffcfffff, 0xfffffffd), 0x7fdfffff, 0xfffffffd, STR(__LINE__)); // -0x1p+1 * -0x1.ffffffffffffdp+1021 = 0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0xffcfffff, 0xfffffffd) * double_of_bits(0xc0000000, 0x00000000), 0x7fdfffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1021 * -0x1p+1 = 0x1.ffffffffffffdp+1022
+}
+
+void f155(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0xffcfffff, 0xfffffffd), 0xffdfffff, 0xfffffffd, STR(__LINE__)); // 0x1p+1 * -0x1.ffffffffffffdp+1021 = -0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0xffcfffff, 0xfffffffd) * double_of_bits(0x40000000, 0x00000000), 0xffdfffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1021 * 0x1p+1 = -0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xfffffffd), 0xffdfffff, 0xfffffffd, STR(__LINE__)); // -0x1p+1 * 0x1.ffffffffffffdp+1021 = -0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0x7fcfffff, 0xfffffffd) * double_of_bits(0xc0000000, 0x00000000), 0xffdfffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1021 * -0x1p+1 = -0x1.ffffffffffffdp+1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // 0x1p+1 * 0x1p-1022 = 0x1p-1021
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1p+1 = 0x1p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // -0x1p+1 * -0x1p-1022 = 0x1p-1021
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1p+1 = 0x1p-1021
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0x80200000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1p+1 = -0x1p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80200000, 0x00000000, STR(__LINE__)); // -0x1p+1 * 0x1p-1022 = -0x1p-1021
+}
+
+void f156(void) {
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x80200000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1p+1 = -0x1p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80200000, 0x00000000, STR(__LINE__)); // 0x1p+1 * -0x1p-1022 = -0x1p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x00200000, 0x00000001, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000001p-1022 = 0x1.0000000000001p-1021
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0x00200000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1p+1 = 0x1.0000000000001p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x00200000, 0x00000001, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000001p-1022 = 0x1.0000000000001p-1021
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1p+1 = 0x1.0000000000001p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x80200000, 0x00000001, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000001p-1022 = -0x1.0000000000001p-1021
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0x80200000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1p+1 = -0x1.0000000000001p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x80200000, 0x00000001, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000001p-1022 = -0x1.0000000000001p-1021
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0x80200000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1p+1 = -0x1.0000000000001p-1021
+}
+
+void f157(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00100000, 0x00000003), 0x00200000, 0x00000003, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000003p-1022 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0x00200000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p-1022 * 0x1p+1 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000003), 0x00200000, 0x00000003, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000003p-1022 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000003, STR(__LINE__)); // -0x1.0000000000003p-1022 * -0x1p+1 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000003, STR(__LINE__)); // -0x1.0000000000003p-1022 * -0x1p+1 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000003), 0x00200000, 0x00000003, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000003p-1022 = 0x1.0000000000003p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80100000, 0x00000003), 0x80200000, 0x00000003, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000003p-1022 = -0x1.0000000000003p-1021
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0x80200000, 0x00000003, STR(__LINE__)); // -0x1.0000000000003p-1022 * 0x1p+1 = -0x1.0000000000003p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00100000, 0x00000003), 0x80200000, 0x00000003, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000003p-1022 = -0x1.0000000000003p-1021
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x80200000, 0x00000003, STR(__LINE__)); // 0x1.0000000000003p-1022 * -0x1p+1 = -0x1.0000000000003p-1021
+}
+
+void f158(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00100000, 0x00000005), 0x00200000, 0x00000005, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000005p-1022 = 0x1.0000000000005p-1021
+ comp64(double_of_bits(0x00100000, 0x00000005) * double_of_bits(0x40000000, 0x00000000), 0x00200000, 0x00000005, STR(__LINE__)); // 0x1.0000000000005p-1022 * 0x1p+1 = 0x1.0000000000005p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000005), 0x00200000, 0x00000005, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000005p-1022 = 0x1.0000000000005p-1021
+ comp64(double_of_bits(0x80100000, 0x00000005) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000005, STR(__LINE__)); // -0x1.0000000000005p-1022 * -0x1p+1 = 0x1.0000000000005p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80100000, 0x00000005), 0x80200000, 0x00000005, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000005p-1022 = -0x1.0000000000005p-1021
+ comp64(double_of_bits(0x80100000, 0x00000005) * double_of_bits(0x40000000, 0x00000000), 0x80200000, 0x00000005, STR(__LINE__)); // -0x1.0000000000005p-1022 * 0x1p+1 = -0x1.0000000000005p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00100000, 0x00000005), 0x80200000, 0x00000005, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000005p-1022 = -0x1.0000000000005p-1021
+ comp64(double_of_bits(0x00100000, 0x00000005) * double_of_bits(0xc0000000, 0x00000000), 0x80200000, 0x00000005, STR(__LINE__)); // 0x1.0000000000005p-1022 * -0x1p+1 = -0x1.0000000000005p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00100000, 0x00000009), 0x00200000, 0x00000009, STR(__LINE__)); // 0x1p+1 * 0x1.0000000000009p-1022 = 0x1.0000000000009p-1021
+ comp64(double_of_bits(0x00100000, 0x00000009) * double_of_bits(0x40000000, 0x00000000), 0x00200000, 0x00000009, STR(__LINE__)); // 0x1.0000000000009p-1022 * 0x1p+1 = 0x1.0000000000009p-1021
+}
+
+void f159(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80100000, 0x00000009), 0x00200000, 0x00000009, STR(__LINE__)); // -0x1p+1 * -0x1.0000000000009p-1022 = 0x1.0000000000009p-1021
+ comp64(double_of_bits(0x80100000, 0x00000009) * double_of_bits(0xc0000000, 0x00000000), 0x00200000, 0x00000009, STR(__LINE__)); // -0x1.0000000000009p-1022 * -0x1p+1 = 0x1.0000000000009p-1021
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00100000, 0x00000009), 0x80200000, 0x00000009, STR(__LINE__)); // -0x1p+1 * 0x1.0000000000009p-1022 = -0x1.0000000000009p-1021
+ comp64(double_of_bits(0x00100000, 0x00000009) * double_of_bits(0xc0000000, 0x00000000), 0x80200000, 0x00000009, STR(__LINE__)); // 0x1.0000000000009p-1022 * -0x1p+1 = -0x1.0000000000009p-1021
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80100000, 0x00000009), 0x80200000, 0x00000009, STR(__LINE__)); // 0x1p+1 * -0x1.0000000000009p-1022 = -0x1.0000000000009p-1021
+ comp64(double_of_bits(0x80100000, 0x00000009) * double_of_bits(0x40000000, 0x00000000), 0x80200000, 0x00000009, STR(__LINE__)); // -0x1.0000000000009p-1022 * 0x1p+1 = -0x1.0000000000009p-1021
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1p+2 * 0x1.fffffffffffffp+1021 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0x40100000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * 0x1p+2 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1p+2 * 0x1.fffffffffffffp+1021 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0xc0100000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * -0x1p+2 = -0x1.fffffffffffffp+1023
+}
+
+void f160(void) {
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0xffefffff, 0xffffffff, STR(__LINE__)); // 0x1p+2 * -0x1.fffffffffffffp+1021 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x40100000, 0x00000000), 0xffefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * 0x1p+2 = -0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x1p+2 * -0x1.fffffffffffffp+1021 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0xc0100000, 0x00000000), 0x7fefffff, 0xffffffff, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * -0x1p+2 = 0x1.fffffffffffffp+1023
+ comp64(double_of_bits(0x7fcfffff, 0xfffffffd) * double_of_bits(0x40100000, 0x00000000), 0x7fefffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1021 * 0x1p+2 = 0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x7fcfffff, 0xfffffffd), 0x7fefffff, 0xfffffffd, STR(__LINE__)); // 0x1p+2 * 0x1.ffffffffffffdp+1021 = 0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0x7fcfffff, 0xfffffffd) * double_of_bits(0xc0100000, 0x00000000), 0xffefffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp+1021 * -0x1p+2 = -0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x7fcfffff, 0xfffffffd), 0xffefffff, 0xfffffffd, STR(__LINE__)); // -0x1p+2 * 0x1.ffffffffffffdp+1021 = -0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0xffcfffff, 0xfffffffd) * double_of_bits(0x40100000, 0x00000000), 0xffefffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1021 * 0x1p+2 = -0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0xffcfffff, 0xfffffffd), 0xffefffff, 0xfffffffd, STR(__LINE__)); // 0x1p+2 * -0x1.ffffffffffffdp+1021 = -0x1.ffffffffffffdp+1023
+}
+
+void f161(void) {
+ comp64(double_of_bits(0xffcfffff, 0xfffffffd) * double_of_bits(0xc0100000, 0x00000000), 0x7fefffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp+1021 * -0x1p+2 = 0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0xffcfffff, 0xfffffffd), 0x7fefffff, 0xfffffffd, STR(__LINE__)); // -0x1p+2 * -0x1.ffffffffffffdp+1021 = 0x1.ffffffffffffdp+1023
+ comp64(double_of_bits(0x3f600000, 0x00000000) * double_of_bits(0x40700000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // 0x1p-9 * 0x1p+8 = 0x1p-1
+ comp64(double_of_bits(0x40700000, 0x00000000) * double_of_bits(0x3f600000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // 0x1p+8 * 0x1p-9 = 0x1p-1
+ comp64(double_of_bits(0xbf600000, 0x00000000) * double_of_bits(0xc0700000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // -0x1p-9 * -0x1p+8 = 0x1p-1
+ comp64(double_of_bits(0xc0700000, 0x00000000) * double_of_bits(0xbf600000, 0x00000000), 0x3fe00000, 0x00000000, STR(__LINE__)); // -0x1p+8 * -0x1p-9 = 0x1p-1
+ comp64(double_of_bits(0xbf600000, 0x00000000) * double_of_bits(0x40700000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // -0x1p-9 * 0x1p+8 = -0x1p-1
+ comp64(double_of_bits(0x40700000, 0x00000000) * double_of_bits(0xbf600000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // 0x1p+8 * -0x1p-9 = -0x1p-1
+ comp64(double_of_bits(0xc0700000, 0x00000000) * double_of_bits(0x3f600000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // -0x1p+8 * 0x1p-9 = -0x1p-1
+ comp64(double_of_bits(0x3f600000, 0x00000000) * double_of_bits(0xc0700000, 0x00000000), 0xbfe00000, 0x00000000, STR(__LINE__)); // 0x1p-9 * -0x1p+8 = -0x1p-1
+}
+
+void f162(void) {
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0x3f900000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // 0x1p-3 * 0x1p-6 = 0x1p-9
+ comp64(double_of_bits(0x3f900000, 0x00000000) * double_of_bits(0x3fc00000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // 0x1p-6 * 0x1p-3 = 0x1p-9
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0xbf900000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // -0x1p-3 * -0x1p-6 = 0x1p-9
+ comp64(double_of_bits(0xbf900000, 0x00000000) * double_of_bits(0xbfc00000, 0x00000000), 0x3f600000, 0x00000000, STR(__LINE__)); // -0x1p-6 * -0x1p-3 = 0x1p-9
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0x3f900000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // -0x1p-3 * 0x1p-6 = -0x1p-9
+ comp64(double_of_bits(0x3f900000, 0x00000000) * double_of_bits(0xbfc00000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // 0x1p-6 * -0x1p-3 = -0x1p-9
+ comp64(double_of_bits(0xbf900000, 0x00000000) * double_of_bits(0x3fc00000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // -0x1p-6 * 0x1p-3 = -0x1p-9
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0xbf900000, 0x00000000), 0xbf600000, 0x00000000, STR(__LINE__)); // 0x1p-3 * -0x1p-6 = -0x1p-9
+ comp64(double_of_bits(0x40200000, 0x00000000) * double_of_bits(0x40800000, 0x00000000), 0x40b00000, 0x00000000, STR(__LINE__)); // 0x1p+3 * 0x1p+9 = 0x1p+12
+ comp64(double_of_bits(0x40800000, 0x00000000) * double_of_bits(0x40200000, 0x00000000), 0x40b00000, 0x00000000, STR(__LINE__)); // 0x1p+9 * 0x1p+3 = 0x1p+12
+}
+
+void f163(void) {
+ comp64(double_of_bits(0xc0200000, 0x00000000) * double_of_bits(0xc0800000, 0x00000000), 0x40b00000, 0x00000000, STR(__LINE__)); // -0x1p+3 * -0x1p+9 = 0x1p+12
+ comp64(double_of_bits(0xc0800000, 0x00000000) * double_of_bits(0xc0200000, 0x00000000), 0x40b00000, 0x00000000, STR(__LINE__)); // -0x1p+9 * -0x1p+3 = 0x1p+12
+ comp64(double_of_bits(0xc0200000, 0x00000000) * double_of_bits(0x40800000, 0x00000000), 0xc0b00000, 0x00000000, STR(__LINE__)); // -0x1p+3 * 0x1p+9 = -0x1p+12
+ comp64(double_of_bits(0x40800000, 0x00000000) * double_of_bits(0xc0200000, 0x00000000), 0xc0b00000, 0x00000000, STR(__LINE__)); // 0x1p+9 * -0x1p+3 = -0x1p+12
+ comp64(double_of_bits(0xc0800000, 0x00000000) * double_of_bits(0x40200000, 0x00000000), 0xc0b00000, 0x00000000, STR(__LINE__)); // -0x1p+9 * 0x1p+3 = -0x1p+12
+ comp64(double_of_bits(0x40200000, 0x00000000) * double_of_bits(0xc0800000, 0x00000000), 0xc0b00000, 0x00000000, STR(__LINE__)); // 0x1p+3 * -0x1p+9 = -0x1p+12
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0p+0 = 0x0p+0
+}
+
+void f164(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0.0000000000002p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 * 0x0p+0 = 0x0p+0
+}
+
+void f165(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0.0000000000002p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0.0000000000002p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x0.0000000000002p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0.0000000000003p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0.0000000000003p-1022 = 0x0p+0
+}
+
+void f166(void) {
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0.0000000000003p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x0.0000000000003p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0.0000000000004p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0.0000000000004p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x0.0000000000004p-1022 = -0x0p+0
+}
+
+void f167(void) {
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0.0000000000004p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x0.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+0 = 0x0p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+0 * 0x0p+0 = 0x0p+0
+}
+
+void f168(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+0 = 0x0p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+0 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p+0 = -0x0p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+0 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+0 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+0 = -0x0p+0
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+1 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+1 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.8p+1 = 0x0p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * 0x0p+0 = 0x0p+0
+}
+
+void f169(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.8p+1 = 0x0p+0
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.8p+1 = -0x0p+0
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x40080000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.8p+1 = -0x0p+0
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.8p+1 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+2 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+2 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+2 = 0x0p+0
+}
+
+void f170(void) {
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+2 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+2 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p+2 = -0x0p+0
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+2 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x40140000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.4p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc0140000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.4p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.4p+2 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0140000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.4p+2 = -0x0p+0
+}
+
+void f171(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x40140000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.4p+2 = -0x0p+0
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x40180000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.8p+2 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x40180000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.8p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0180000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc0180000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.8p+2 = 0x0p+0
+ comp64(double_of_bits(0xc0180000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.8p+2 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0180000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.8p+2 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x40180000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.8p+2 = -0x0p+0
+ comp64(double_of_bits(0x40180000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.8p+2 * -0x0p+0 = -0x0p+0
+}
+
+void f172(void) {
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x401c0000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.cp+2 = 0x0p+0
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.cp+2 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc01c0000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.cp+2 = 0x0p+0
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.cp+2 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc01c0000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.cp+2 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x401c0000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.cp+2 = -0x0p+0
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x40200000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+3 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x40200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+3 = 0x0p+0
+}
+
+void f173(void) {
+ comp64(double_of_bits(0xc0200000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+3 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xc0200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+3 = 0x0p+0
+ comp64(double_of_bits(0xc0200000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+3 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xc0200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+3 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x40200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p+3 = -0x0p+0
+ comp64(double_of_bits(0x40200000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+3 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+1023 = 0x0p+0
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xffe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+1023 = 0x0p+0
+}
+
+void f174(void) {
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xffe00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+1023 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p+1023 = -0x0p+0
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x7fd00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p+1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+1022 = 0x0p+0
+ comp64(double_of_bits(0xffd00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p+1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+1022 = 0x0p+0
+ comp64(double_of_bits(0xffd00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p+1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xffd00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p+1022 = -0x0p+0
+}
+
+void f175(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7fd00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p+1022 = -0x0p+0
+ comp64(double_of_bits(0x7fd00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p+1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7fdfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffffffffffp+1022 = 0x0p+0
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xffdfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffffffffffp+1022 = 0x0p+0
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xffdfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffffffffffp+1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7fdfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffffffffffp+1022 = -0x0p+0
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * -0x0p+0 = -0x0p+0
+}
+
+void f176(void) {
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffffffffffp+1021 = 0x0p+0
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffffffffffp+1021 = 0x0p+0
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffffffffffp+1021 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7fcfffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffffffffffp+1021 = -0x0p+0
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7fefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 * 0x0p+0 = 0x0p+0
+}
+
+void f177(void) {
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xffefffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffffffffffp+1023 = 0x0p+0
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xffefffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffffffffffp+1023 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7fefffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffffffffffp+1023 = -0x0p+0
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-1022 = 0x0p+0
+}
+
+void f178(void) {
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1p-1021 = -0x0p+0
+}
+
+void f179(void) {
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x001fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x801fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffffffffffp-1022 = -0x0p+0
+}
+
+void f180(void) {
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x00000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x80000000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x0p+0 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x00000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x80000000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x0p+0 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0p+0 * 0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf * 0x0p+0 = nan
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 * inf = nan
+}
+
+void f181(void) {
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 * -inf = nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf * -0x0p+0 = nan
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 * inf = -nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // inf * -0x0p+0 = -nan
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 * -inf = -nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0xfff80000, 0x00000000, STR(__LINE__)); // -inf * 0x0p+0 = -nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x00000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * 0x0p+0 = nan
+ comp64(double_of_bits(0x00000000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0p+0 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x80000000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -0x0p+0 = nan
+ comp64(double_of_bits(0x80000000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0p+0 * nan = nan
+}
+
+void f182(void) {
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -inf = inf
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x0.0000000000001p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x0.0000000000001p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x0.0000000000001p-1022 = -inf
+}
+
+void f183(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x0.0000000000001p-1022 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -inf = -inf
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x0.0000000000002p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x0.0000000000002p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000002p-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x0.0000000000002p-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x0.0000000000002p-1022 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000002p-1022 * -inf = -inf
+}
+
+void f184(void) {
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x0.0000000000003p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x0.0000000000003p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000003p-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x0.0000000000003p-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x0.0000000000003p-1022 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000003p-1022 * -inf = -inf
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x0.0000000000004p-1022 = inf
+}
+
+void f185(void) {
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x0.0000000000004p-1022 = inf
+ comp64(double_of_bits(0x80000000, 0x00000004) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.0000000000004p-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80000000, 0x00000004), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x0.0000000000004p-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x00000000, 0x00000004), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x0.0000000000004p-1022 = -inf
+ comp64(double_of_bits(0x00000000, 0x00000004) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.0000000000004p-1022 * -inf = -inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x0.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x0.fffffffffffffp-1022 = inf
+}
+
+void f186(void) {
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x0.fffffffffffffp-1022 = -inf
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * -inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1p+0 = inf
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+0 * inf = inf
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xc0000000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1p+1 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1.8p+1 = -inf
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * inf = -inf
+}
+
+void f187(void) {
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+2 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0xc0100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1p+2 = inf
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.4p+2 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x40140000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1.4p+2 = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x40180000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x1.8p+2 = -inf
+ comp64(double_of_bits(0x40180000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.8p+2 * -inf = -inf
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.cp+2 * -inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x401c0000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -inf * 0x1.cp+2 = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0xc0200000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1p+3 = inf
+ comp64(double_of_bits(0xc0200000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+3 * -inf = inf
+}
+
+void f188(void) {
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1p+1023 = inf
+ comp64(double_of_bits(0xffd00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xffd00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1p+1022 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1p+1023 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * inf = -inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0xffd00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1p+1022 = inf
+ comp64(double_of_bits(0xffd00000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 * -inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x7fdfffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1.fffffffffffffp+1022 = inf
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * inf = inf
+}
+
+void f189(void) {
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xffcfffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1.fffffffffffffp+1021 = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * inf = -inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0xffefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1p-1022 = inf
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1p-1021 = -inf
+}
+
+void f190(void) {
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1p-1022 = inf
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -inf = inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x001fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // inf * 0x1.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * inf = inf
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x7ff00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * inf = -inf
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // inf * -0x1.0000000000001p-1022 = -inf
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0xfff00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -inf = inf
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x801fffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -inf * -0x1.fffffffffffffp-1022 = inf
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x7ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * inf = nan
+ comp64(double_of_bits(0x7ff00000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // inf * nan = nan
+}
+
+void f191(void) {
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0xfff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -inf = nan
+ comp64(double_of_bits(0xfff00000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -inf * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * nan = nan
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * 0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -0x0.fffffffffffffp-1022 = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * 0x0.0000000000001p-1022 = nan
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -0x0.0000000000001p-1022 = nan
+}
+
+void f192(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * 0x1p+0 = nan
+ comp64(double_of_bits(0x3ff00000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1p+0 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0xbff00000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -0x1p+0 = nan
+ comp64(double_of_bits(0xbff00000, 0x00000000) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1p+0 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0x7fefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * 0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 * nan = nan
+ comp64(double_of_bits(0x7ff80000, 0x00000000) * double_of_bits(0xffefffff, 0xffffffff), 0x7ff80000, 0x00000000, STR(__LINE__)); // nan * -0x1.fffffffffffffp+1023 = nan
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0x7ff80000, 0x00000000), 0x7ff80000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * nan = nan
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x40000000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * 0x1p+1 = inf
+}
+
+void f193(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1 * 0x1p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * 0x1p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000009) * double_of_bits(0x7fefffff, 0xfffffffa), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000009p+1023 * 0x1.ffffffffffffap+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffa) * double_of_bits(0x7fe00000, 0x00000009), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffap+1023 * 0x1.0000000000009p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0x7fefffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * 0x1.ffffffffffffep+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) * double_of_bits(0x7fe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 * 0x1p+1023 = inf
+ comp64(double_of_bits(0xc013ffff, 0xfffffffe) * double_of_bits(0xffe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.3fffffffffffep+2 * -0x1p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0xc013ffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * -0x1.3fffffffffffep+2 = inf
+ comp64(double_of_bits(0xc0220000, 0x00000001) * double_of_bits(0xffe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.2000000000001p+3 * -0x1p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0xc0220000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * -0x1.2000000000001p+3 = inf
+}
+
+void f194(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xffe00000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.8p+1 * -0x1p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0xc0080000, 0x00000000), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * -0x1.8p+1 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000005) * double_of_bits(0xffe00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000005p+1023 * -0x1.0000000000001p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000001) * double_of_bits(0xffe00000, 0x00000005), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 * -0x1.0000000000005p+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0xffefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * -0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffd) * double_of_bits(0xffe00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffdp+1023 * -0x1.0000000000001p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000001) * double_of_bits(0xffefffff, 0xfffffffd), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 * -0x1.ffffffffffffdp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffd) * double_of_bits(0xc0080000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffdp+1023 * -0x1.8000000000001p+1 = inf
+ comp64(double_of_bits(0xc0080000, 0x00000001) * double_of_bits(0xffefffff, 0xfffffffd), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.8000000000001p+1 * -0x1.ffffffffffffdp+1023 = inf
+ comp64(double_of_bits(0xc007ffff, 0xfffffffe) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.7fffffffffffep+1 * 0x1p+1023 = -inf
+}
+
+void f195(void) {
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xc007ffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.7fffffffffffep+1 = -inf
+ comp64(double_of_bits(0xc01bffff, 0xfffffff9) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.bfffffffffff9p+2 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xc01bffff, 0xfffffff9), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.bfffffffffff9p+2 = -inf
+ comp64(double_of_bits(0xc0220000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.2p+3 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xc0220000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.2p+3 = -inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffd) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffdp+1023 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xffefffff, 0xfffffffd), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.ffffffffffffdp+1023 = -inf
+ comp64(double_of_bits(0xffcfffff, 0xfffffff9) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffff9p+1021 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xffcfffff, 0xfffffff9), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.ffffffffffff9p+1021 = -inf
+ comp64(double_of_bits(0xffdfffff, 0xfffffff7) * double_of_bits(0x7fd00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffff7p+1022 * 0x1.0000000000001p+1022 = -inf
+}
+
+void f196(void) {
+ comp64(double_of_bits(0x7fd00000, 0x00000001) * double_of_bits(0xffdfffff, 0xfffffff7), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1022 * -0x1.ffffffffffff7p+1022 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xffd00000, 0x00000004), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1.0000000000004p+1022 = -inf
+ comp64(double_of_bits(0xffd00000, 0x00000004) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+1022 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xffd00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1p+1022 = -inf
+ comp64(double_of_bits(0xffd00000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1022 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000000) * double_of_bits(0xffe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1p+1023 * -0x1p+1023 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000000) * double_of_bits(0x7fe00000, 0x00000000), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1p+1023 * 0x1p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000009) * double_of_bits(0xc0180000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000009p+1023 * -0x1.8000000000002p+2 = -inf
+ comp64(double_of_bits(0xc0180000, 0x00000002) * double_of_bits(0x7fe00000, 0x00000009), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.8000000000002p+2 * 0x1.0000000000009p+1023 = -inf
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x7fefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.fffffffffffffp+1023 = inf
+}
+
+void f197(void) {
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 * 0x1.0000000000001p+0 = inf
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xffefffff, 0xffffffff), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.fffffffffffffp+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * -0x1.0000000000001p+0 = inf
+ comp64(double_of_bits(0x3ff00000, 0x00000002) * double_of_bits(0x7fefffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+0 * 0x1.ffffffffffffep+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 * 0x1.0000000000002p+0 = inf
+ comp64(double_of_bits(0xbff00000, 0x00000002) * double_of_bits(0xffefffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+0 * -0x1.ffffffffffffep+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1023 * -0x1.0000000000002p+0 = inf
+ comp64(double_of_bits(0x3ff00000, 0x00000004) * double_of_bits(0x7fefffff, 0xfffffffc), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000004p+0 * 0x1.ffffffffffffcp+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffc) * double_of_bits(0x3ff00000, 0x00000004), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp+1023 * 0x1.0000000000004p+0 = inf
+ comp64(double_of_bits(0xbff00000, 0x00000004) * double_of_bits(0xffefffff, 0xfffffffc), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+0 * -0x1.ffffffffffffcp+1023 = inf
+}
+
+void f198(void) {
+ comp64(double_of_bits(0xffefffff, 0xfffffffc) * double_of_bits(0xbff00000, 0x00000004), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp+1023 * -0x1.0000000000004p+0 = inf
+ comp64(double_of_bits(0x3ff00000, 0x00000008) * double_of_bits(0x7fefffff, 0xfffffff8), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000008p+0 * 0x1.ffffffffffff8p+1023 = inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000008), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffff8p+1023 * 0x1.0000000000008p+0 = inf
+ comp64(double_of_bits(0xbff00000, 0x00000008) * double_of_bits(0xffefffff, 0xfffffff8), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+0 * -0x1.ffffffffffff8p+1023 = inf
+ comp64(double_of_bits(0xffefffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000008), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffff8p+1023 * -0x1.0000000000008p+0 = inf
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x7fefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1023 * -0x1.0000000000001p+0 = -inf
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * 0x1.0000000000001p+0 = -inf
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xffefffff, 0xffffffff), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.fffffffffffffp+1023 = -inf
+ comp64(double_of_bits(0xbff00000, 0x00000002) * double_of_bits(0x7fefffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+0 * 0x1.ffffffffffffep+1023 = -inf
+}
+
+void f199(void) {
+ comp64(double_of_bits(0x7fefffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+1023 * -0x1.0000000000002p+0 = -inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+1023 * 0x1.0000000000002p+0 = -inf
+ comp64(double_of_bits(0x3ff00000, 0x00000002) * double_of_bits(0xffefffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+0 * -0x1.ffffffffffffep+1023 = -inf
+ comp64(double_of_bits(0xbff00000, 0x00000004) * double_of_bits(0x7fefffff, 0xfffffffc), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000004p+0 * 0x1.ffffffffffffcp+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffffc) * double_of_bits(0xbff00000, 0x00000004), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp+1023 * -0x1.0000000000004p+0 = -inf
+ comp64(double_of_bits(0xffefffff, 0xfffffffc) * double_of_bits(0x3ff00000, 0x00000004), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp+1023 * 0x1.0000000000004p+0 = -inf
+ comp64(double_of_bits(0x3ff00000, 0x00000004) * double_of_bits(0xffefffff, 0xfffffffc), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000004p+0 * -0x1.ffffffffffffcp+1023 = -inf
+ comp64(double_of_bits(0xbff00000, 0x00000008) * double_of_bits(0x7fefffff, 0xfffffff8), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+0 * 0x1.ffffffffffff8p+1023 = -inf
+ comp64(double_of_bits(0x7fefffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000008), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffff8p+1023 * -0x1.0000000000008p+0 = -inf
+ comp64(double_of_bits(0xffefffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000008), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffff8p+1023 * 0x1.0000000000008p+0 = -inf
+}
+
+void f200(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000008) * double_of_bits(0xffefffff, 0xfffffff8), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000008p+0 * -0x1.ffffffffffff8p+1023 = -inf
+ comp64(double_of_bits(0x7fdfffff, 0xfffffffd) * double_of_bits(0xc0000000, 0x00000008), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffdp+1022 * -0x1.0000000000008p+1 = -inf
+ comp64(double_of_bits(0xc0000000, 0x00000008) * double_of_bits(0x7fdfffff, 0xfffffffd), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+1 * 0x1.ffffffffffffdp+1022 = -inf
+ comp64(double_of_bits(0x3fffffff, 0xfffffffc) * double_of_bits(0x7fe00000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp+0 * 0x1.0000000000002p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000002) * double_of_bits(0x3fffffff, 0xfffffffc), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+1023 * 0x1.ffffffffffffcp+0 = inf
+ comp64(double_of_bits(0xbfffffff, 0xfffffffc) * double_of_bits(0xffe00000, 0x00000002), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp+0 * -0x1.0000000000002p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000002) * double_of_bits(0xbfffffff, 0xfffffffc), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+1023 * -0x1.ffffffffffffcp+0 = inf
+ comp64(double_of_bits(0xbfffffff, 0xfffffffc) * double_of_bits(0x7fe00000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp+0 * 0x1.0000000000002p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000002) * double_of_bits(0xbfffffff, 0xfffffffc), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p+1023 * -0x1.ffffffffffffcp+0 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000002) * double_of_bits(0x3fffffff, 0xfffffffc), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p+1023 * 0x1.ffffffffffffcp+0 = -inf
+}
+
+void f201(void) {
+ comp64(double_of_bits(0x3fffffff, 0xfffffffc) * double_of_bits(0xffe00000, 0x00000002), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp+0 * -0x1.0000000000002p+1023 = -inf
+ comp64(double_of_bits(0x3fffffff, 0xfffffffe) * double_of_bits(0x7fe00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+0 * 0x1.0000000000001p+1023 = inf
+ comp64(double_of_bits(0x7fe00000, 0x00000001) * double_of_bits(0x3fffffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1023 * 0x1.ffffffffffffep+0 = inf
+ comp64(double_of_bits(0xbfffffff, 0xfffffffe) * double_of_bits(0xffe00000, 0x00000001), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+0 * -0x1.0000000000001p+1023 = inf
+ comp64(double_of_bits(0xffe00000, 0x00000001) * double_of_bits(0xbfffffff, 0xfffffffe), 0x7ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 * -0x1.ffffffffffffep+0 = inf
+ comp64(double_of_bits(0xbfffffff, 0xfffffffe) * double_of_bits(0x7fe00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep+0 * 0x1.0000000000001p+1023 = -inf
+ comp64(double_of_bits(0x7fe00000, 0x00000001) * double_of_bits(0xbfffffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1023 * -0x1.ffffffffffffep+0 = -inf
+ comp64(double_of_bits(0xffe00000, 0x00000001) * double_of_bits(0x3fffffff, 0xfffffffe), 0xfff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1023 * 0x1.ffffffffffffep+0 = -inf
+ comp64(double_of_bits(0x3fffffff, 0xfffffffe) * double_of_bits(0xffe00000, 0x00000001), 0xfff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep+0 * -0x1.0000000000001p+1023 = -inf
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1p-1022 = 0x0p+0
+}
+
+void f202(void) {
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * 0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1p-1021 = -0x0p+0
+}
+
+void f203(void) {
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * 0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * 0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * -0x1p-1021 = 0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000000) * double_of_bits(0x00200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1021 * 0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000000) * double_of_bits(0x80200000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1021 * -0x1p-1021 = -0x0p+0
+ comp64(double_of_bits(0x3c800000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-55 * 0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3c800000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1p-55 = 0x0p+0
+ comp64(double_of_bits(0xbc800000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-55 * -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbc800000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1p-55 = 0x0p+0
+ comp64(double_of_bits(0xbc800000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-55 * 0x1p-1022 = -0x0p+0
+}
+
+void f204(void) {
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbc800000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1p-55 = -0x0p+0
+ comp64(double_of_bits(0x3c800000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-55 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3c800000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1p-55 = -0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xfffffff7) * double_of_bits(0x00200000, 0x00000003), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffff7p-1022 * 0x1.0000000000003p-1021 = -0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000003) * double_of_bits(0x800fffff, 0xfffffff7), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p-1021 * -0x0.ffffffffffff7p-1022 = -0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xfffffff7) * double_of_bits(0x80200000, 0x00000003), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffff7p-1022 * -0x1.0000000000003p-1021 = -0x0p+0
+ comp64(double_of_bits(0x80200000, 0x00000003) * double_of_bits(0x000fffff, 0xfffffff7), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000003p-1021 * 0x0.ffffffffffff7p-1022 = -0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xfffffff7) * double_of_bits(0x00200000, 0x00000003), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffff7p-1022 * 0x1.0000000000003p-1021 = 0x0p+0
+ comp64(double_of_bits(0x00200000, 0x00000003) * double_of_bits(0x000fffff, 0xfffffff7), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000003p-1021 * 0x0.ffffffffffff7p-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xfffffff7) * double_of_bits(0x80200000, 0x00000003), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffff7p-1022 * -0x1.0000000000003p-1021 = 0x0p+0
+}
+
+void f205(void) {
+ comp64(double_of_bits(0x80200000, 0x00000003) * double_of_bits(0x800fffff, 0xfffffff7), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000003p-1021 * -0x0.ffffffffffff7p-1022 = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x000fffff, 0xfffffffe), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * 0x0.ffffffffffffep-1022 = 0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0x000fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * 0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x800fffff, 0xfffffffe), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -0x0.ffffffffffffep-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0x800fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * -0x0.fffffffffffffp-1022 = 0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x000fffff, 0xfffffffe), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * 0x0.ffffffffffffep-1022 = -0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0x800fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * -0x0.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x800fffff, 0xfffffffe), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * -0x0.ffffffffffffep-1022 = -0x0p+0
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0x000fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * 0x0.fffffffffffffp-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fc00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p-3 = 0x0p+0
+}
+
+void f206(void) {
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-3 * 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfc00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p-3 = 0x0p+0
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-3 * -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fc00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p-3 = -0x0p+0
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-3 * -0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfc00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p-3 = -0x0p+0
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-3 * 0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x0.0000000000001p-1022 = -0x0p+0
+}
+
+void f207(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x3c8fffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-55 * 0x1.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3c8fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1.fffffffffffffp-55 = 0x0p+0
+ comp64(double_of_bits(0xbc8fffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-55 * -0x1.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbc8fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1.fffffffffffffp-55 = 0x0p+0
+ comp64(double_of_bits(0xbc8fffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-55 * 0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbc8fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1.fffffffffffffp-55 = -0x0p+0
+ comp64(double_of_bits(0x3c8fffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-55 * -0x1.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3c8fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1.fffffffffffffp-55 = -0x0p+0
+ comp64(double_of_bits(0x3c9fffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-54 * 0x1p-1022 = 0x0p+0
+}
+
+void f208(void) {
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3c9fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1.fffffffffffffp-54 = 0x0p+0
+ comp64(double_of_bits(0xbc9fffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-54 * -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbc9fffff, 0xffffffff), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1.fffffffffffffp-54 = 0x0p+0
+ comp64(double_of_bits(0xbc9fffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-54 * 0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbc9fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1.fffffffffffffp-54 = -0x0p+0
+ comp64(double_of_bits(0x3c9fffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-54 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3c9fffff, 0xffffffff), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1.fffffffffffffp-54 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fd00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p-2 = 0x0p+0
+ comp64(double_of_bits(0x3fd00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-2 * 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfd00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p-2 = 0x0p+0
+}
+
+void f209(void) {
+ comp64(double_of_bits(0xbfd00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-2 * -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fd00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p-2 = -0x0p+0
+ comp64(double_of_bits(0x3fd00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-2 * -0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0xbfd00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-2 * 0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfd00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p-2 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3ff7ffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.7ffffffffffffp+0 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ff7ffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.7ffffffffffffp+0 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbff7ffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.7ffffffffffffp+0 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbff7ffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.7ffffffffffffp+0 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3ff7ffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.7ffffffffffffp+0 = -0x0.0000000000001p-1022
+}
+
+void f210(void) {
+ comp64(double_of_bits(0x3ff7ffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.7ffffffffffffp+0 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbff7ffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.7ffffffffffffp+0 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbff7ffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.7ffffffffffffp+0 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x400bffff, 0xffffffff), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.bffffffffffffp+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x400bffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // 0x1.bffffffffffffp+1 * 0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc00bffff, 0xffffffff), 0x00000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.bffffffffffffp+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc00bffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // -0x1.bffffffffffffp+1 * -0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x400bffff, 0xffffffff), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.bffffffffffffp+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x400bffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // 0x1.bffffffffffffp+1 * -0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc00bffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // -0x1.bffffffffffffp+1 * 0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+}
+
+void f211(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc00bffff, 0xffffffff), 0x80000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.bffffffffffffp+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x3ca00000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-53 * 0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3ca00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * 0x1p-53 = 0x0p+0
+ comp64(double_of_bits(0xbca00000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-53 * -0x1p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbca00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * -0x1p-53 = 0x0p+0
+ comp64(double_of_bits(0xbca00000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-53 * 0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbca00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1022 * -0x1p-53 = -0x0p+0
+ comp64(double_of_bits(0x3ca00000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-53 * -0x1p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3ca00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1022 * 0x1p-53 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p-1 = 0x0p+0
+}
+
+void f212(void) {
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // 0x1p-1 * 0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000000), 0x00000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p-1 = 0x0p+0
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000000, STR(__LINE__)); // -0x1p-1 * -0x0.0000000000001p-1022 = 0x0p+0
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // 0x1p-1 * -0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p-1 = -0x0p+0
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000000, STR(__LINE__)); // -0x1p-1 * 0x0.0000000000001p-1022 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000000), 0x80000000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p-1 = -0x0p+0
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3ff80000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.8p+0 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x3ff80000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1.8p+0 * 0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbff80000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.8p+0 = 0x0.0000000000002p-1022
+}
+
+void f213(void) {
+ comp64(double_of_bits(0xbff80000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1.8p+0 * -0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x3ff80000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1.8p+0 * -0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3ff80000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.8p+0 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0xbff80000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1.8p+0 * 0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbff80000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.8p+0 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40040000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.4p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x40040000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1.4p+1 * 0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0040000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.4p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0xc0040000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1.4p+1 * -0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x40040000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1.4p+1 * -0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+}
+
+void f214(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40040000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.4p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0xc0040000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1.4p+1 * 0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0040000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.4p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x400c0000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.cp+1 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x400c0000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000004, STR(__LINE__)); // 0x1.cp+1 * 0x0.0000000000001p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc00c0000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.cp+1 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc00c0000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000004, STR(__LINE__)); // -0x1.cp+1 * -0x0.0000000000001p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x400c0000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000004, STR(__LINE__)); // 0x1.cp+1 * -0x0.0000000000001p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x400c0000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.cp+1 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc00c0000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000004, STR(__LINE__)); // -0x1.cp+1 * 0x0.0000000000001p-1022 = -0x0.0000000000004p-1022
+}
+
+void f215(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc00c0000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.cp+1 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x3ca40000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.4p-53 * 0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3ca40000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * 0x1.4p-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbca40000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.4p-53 * -0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbca40000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * -0x1.4p-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbca40000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.4p-53 * 0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbca40000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * -0x1.4p-53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3ca40000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * 0x1.4p-53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ca40000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.4p-53 * -0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.0000000000001p-1 = 0x0.0000000000001p-1022
+}
+
+void f216(void) {
+ comp64(double_of_bits(0x3fe00000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.0000000000001p-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.0000000000001p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.0000000000001p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x40040000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // 0x1.4000000000001p+1 * 0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40040000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.4000000000001p+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc0040000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // -0x1.4000000000001p+1 * -0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+}
+
+void f217(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0040000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.4000000000001p+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc0040000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // -0x1.4000000000001p+1 * 0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0040000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.4000000000001p+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40040000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.4000000000001p+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x40040000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // 0x1.4000000000001p+1 * -0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x3c900000, 0x00000001) * double_of_bits(0x001fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-54 * 0x1.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x3c900000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * 0x1.0000000000001p-54 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbc900000, 0x00000001) * double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-54 * -0x1.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0xbc900000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -0x1.0000000000001p-54 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbc900000, 0x00000001) * double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-54 * 0x1.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+}
+
+void f218(void) {
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0xbc900000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * -0x1.0000000000001p-54 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x3c900000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * 0x1.0000000000001p-54 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3c900000, 0x00000001) * double_of_bits(0x801fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-54 * -0x1.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3ca80000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.8p-53 * 0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3ca80000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * 0x1.8p-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbca80000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.8p-53 * -0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbca80000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * -0x1.8p-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbca80000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.8p-53 * 0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbca80000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * -0x1.8p-53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3ca80000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * 0x1.8p-53 = -0x0.0000000000001p-1022
+}
+
+void f219(void) {
+ comp64(double_of_bits(0x3ca80000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.8p-53 * -0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.8p-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.8p-1 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.8p-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfe80000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.8p-1 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.8p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.8p-1 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfe80000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.8p-1 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.8p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3cac0000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.cp-53 * 0x1p-1022 = 0x0.0000000000001p-1022
+}
+
+void f220(void) {
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3cac0000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * 0x1.cp-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbcac0000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.cp-53 * -0x1p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbcac0000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * -0x1.cp-53 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbcac0000, 0x00000000) * double_of_bits(0x00100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.cp-53 * 0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbcac0000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p-1022 * -0x1.cp-53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3cac0000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-1022 * 0x1.cp-53 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3cac0000, 0x00000000) * double_of_bits(0x80100000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.cp-53 * -0x1p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.8000000000001p-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.8000000000001p-1 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.8000000000001p-1 = 0x0.0000000000001p-1022
+}
+
+void f221(void) {
+ comp64(double_of_bits(0xbfe80000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.8000000000001p-1 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.8000000000001p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000001) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.8000000000001p-1 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfe80000, 0x00000001) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.8000000000001p-1 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.8000000000001p-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x3fefffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.fffffffffffffp-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xbfefffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.fffffffffffffp-1 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x0.0000000000001p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x3fefffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.fffffffffffffp-1 = -0x0.0000000000001p-1022
+}
+
+void f222(void) {
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xbfefffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.fffffffffffffp-1 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x0.0000000000001p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3c9fffff, 0xffffffff) * double_of_bits(0x001fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-54 * 0x1.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x3c9fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * 0x1.fffffffffffffp-54 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbc9fffff, 0xffffffff) * double_of_bits(0x801fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-54 * -0x1.fffffffffffffp-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0xbc9fffff, 0xffffffff), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -0x1.fffffffffffffp-54 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbc9fffff, 0xffffffff) * double_of_bits(0x001fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-54 * 0x1.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0xbc9fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * -0x1.fffffffffffffp-54 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x3c9fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * 0x1.fffffffffffffp-54 = -0x0.0000000000001p-1022
+}
+
+void f223(void) {
+ comp64(double_of_bits(0x3c9fffff, 0xffffffff) * double_of_bits(0x801fffff, 0xffffffff), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-54 * -0x1.fffffffffffffp-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffc) * double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xfffffffd, STR(__LINE__)); // 0x0.ffffffffffffcp-1022 * 0x1.0000000000001p+0 = 0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffffc), 0x000fffff, 0xfffffffd, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x0.ffffffffffffcp-1022 = 0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffc) * double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xfffffffd, STR(__LINE__)); // -0x0.ffffffffffffcp-1022 * -0x1.0000000000001p+0 = 0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffffc), 0x000fffff, 0xfffffffd, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x0.ffffffffffffcp-1022 = 0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffc) * double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xfffffffd, STR(__LINE__)); // -0x0.ffffffffffffcp-1022 * 0x1.0000000000001p+0 = -0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffffc), 0x800fffff, 0xfffffffd, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x0.ffffffffffffcp-1022 = -0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffffc), 0x800fffff, 0xfffffffd, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x0.ffffffffffffcp-1022 = -0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffc) * double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xfffffffd, STR(__LINE__)); // 0x0.ffffffffffffcp-1022 * -0x1.0000000000001p+0 = -0x0.ffffffffffffdp-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x000c0000, 0x00000001, STR(__LINE__)); // 0x1.8p-1 * 0x1.0000000000001p-1022 = 0x0.c000000000001p-1022
+}
+
+void f224(void) {
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000000), 0x000c0000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1.8p-1 = 0x0.c000000000001p-1022
+ comp64(double_of_bits(0xbfe80000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x000c0000, 0x00000001, STR(__LINE__)); // -0x1.8p-1 * -0x1.0000000000001p-1022 = 0x0.c000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000000), 0x000c0000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1.8p-1 = 0x0.c000000000001p-1022
+ comp64(double_of_bits(0xbfe80000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x800c0000, 0x00000001, STR(__LINE__)); // -0x1.8p-1 * 0x1.0000000000001p-1022 = -0x0.c000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbfe80000, 0x00000000), 0x800c0000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1.8p-1 = -0x0.c000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3fe80000, 0x00000000), 0x800c0000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1.8p-1 = -0x0.c000000000001p-1022
+ comp64(double_of_bits(0x3fe80000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x800c0000, 0x00000001, STR(__LINE__)); // 0x1.8p-1 * -0x1.0000000000001p-1022 = -0x0.c000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * 0x1.0000000000001p+0 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x0.ffffffffffffep-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * -0x1.0000000000001p+0 = 0x0.fffffffffffffp-1022
+}
+
+void f225(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x0.ffffffffffffep-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * 0x1.0000000000001p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x0.ffffffffffffep-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x0.ffffffffffffep-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * -0x1.0000000000001p+0 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xfffffff9, STR(__LINE__)); // 0x0.ffffffffffff8p-1022 * 0x1.0000000000001p+0 = 0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffff8), 0x000fffff, 0xfffffff9, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x0.ffffffffffff8p-1022 = 0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xfffffff9, STR(__LINE__)); // -0x0.ffffffffffff8p-1022 * -0x1.0000000000001p+0 = 0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffff8), 0x000fffff, 0xfffffff9, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x0.ffffffffffff8p-1022 = 0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xfffffff9, STR(__LINE__)); // -0x0.ffffffffffff8p-1022 * 0x1.0000000000001p+0 = -0x0.ffffffffffff9p-1022
+}
+
+void f226(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffff8), 0x800fffff, 0xfffffff9, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x0.ffffffffffff8p-1022 = -0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffff8), 0x800fffff, 0xfffffff9, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x0.ffffffffffff8p-1022 = -0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xfffffff9, STR(__LINE__)); // 0x0.ffffffffffff8p-1022 * -0x1.0000000000001p+0 = -0x0.ffffffffffff9p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffff7) * double_of_bits(0x3ff00000, 0x00000001), 0x000fffff, 0xfffffff8, STR(__LINE__)); // 0x0.ffffffffffff7p-1022 * 0x1.0000000000001p+0 = 0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffff7), 0x000fffff, 0xfffffff8, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x0.ffffffffffff7p-1022 = 0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff7) * double_of_bits(0xbff00000, 0x00000001), 0x000fffff, 0xfffffff8, STR(__LINE__)); // -0x0.ffffffffffff7p-1022 * -0x1.0000000000001p+0 = 0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffff7), 0x000fffff, 0xfffffff8, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x0.ffffffffffff7p-1022 = 0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff7) * double_of_bits(0x3ff00000, 0x00000001), 0x800fffff, 0xfffffff8, STR(__LINE__)); // -0x0.ffffffffffff7p-1022 * 0x1.0000000000001p+0 = -0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x800fffff, 0xfffffff7), 0x800fffff, 0xfffffff8, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x0.ffffffffffff7p-1022 = -0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x000fffff, 0xfffffff7), 0x800fffff, 0xfffffff8, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x0.ffffffffffff7p-1022 = -0x0.ffffffffffff8p-1022
+}
+
+void f227(void) {
+ comp64(double_of_bits(0x000fffff, 0xfffffff7) * double_of_bits(0xbff00000, 0x00000001), 0x800fffff, 0xfffffff8, STR(__LINE__)); // 0x0.ffffffffffff7p-1022 * -0x1.0000000000001p+0 = -0x0.ffffffffffff8p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffa), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1.ffffffffffffap-1 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffa) * double_of_bits(0x00100000, 0x00000001), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffap-1 * 0x1.0000000000001p-1022 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffa), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1.ffffffffffffap-1 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffa) * double_of_bits(0x80100000, 0x00000001), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffap-1 * -0x1.0000000000001p-1022 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffa), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1.ffffffffffffap-1 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffa) * double_of_bits(0x80100000, 0x00000001), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffap-1 * -0x1.0000000000001p-1022 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffa) * double_of_bits(0x00100000, 0x00000001), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffap-1 * 0x1.0000000000001p-1022 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffa), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1.ffffffffffffap-1 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0x3fefffff, 0xfffffffc), 0x000fffff, 0xfffffffc, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * 0x1.ffffffffffffcp-1 = 0x0.ffffffffffffcp-1022
+}
+
+void f228(void) {
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0x000fffff, 0xfffffffe), 0x000fffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * 0x0.ffffffffffffep-1022 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0xbfefffff, 0xfffffffc), 0x000fffff, 0xfffffffc, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * -0x1.ffffffffffffcp-1 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0x800fffff, 0xfffffffe), 0x000fffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * -0x0.ffffffffffffep-1022 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffe) * double_of_bits(0x3fefffff, 0xfffffffc), 0x800fffff, 0xfffffffc, STR(__LINE__)); // -0x0.ffffffffffffep-1022 * 0x1.ffffffffffffcp-1 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0x800fffff, 0xfffffffe), 0x800fffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * -0x0.ffffffffffffep-1022 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0x000fffff, 0xfffffffe), 0x800fffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * 0x0.ffffffffffffep-1022 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffe) * double_of_bits(0xbfefffff, 0xfffffffc), 0x800fffff, 0xfffffffc, STR(__LINE__)); // 0x0.ffffffffffffep-1022 * -0x1.ffffffffffffcp-1 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x3fdfffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000003), 0x00080000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-2 * 0x1.0000000000003p-1022 = 0x0.8000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0x3fdfffff, 0xffffffff), 0x00080000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p-1022 * 0x1.fffffffffffffp-2 = 0x0.8000000000001p-1022
+ comp64(double_of_bits(0xbfdfffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000003), 0x00080000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-2 * -0x1.0000000000003p-1022 = 0x0.8000000000001p-1022
+}
+
+void f229(void) {
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0xbfdfffff, 0xffffffff), 0x00080000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p-1022 * -0x1.fffffffffffffp-2 = 0x0.8000000000001p-1022
+ comp64(double_of_bits(0xbfdfffff, 0xffffffff) * double_of_bits(0x00100000, 0x00000003), 0x80080000, 0x00000001, STR(__LINE__)); // -0x1.fffffffffffffp-2 * 0x1.0000000000003p-1022 = -0x0.8000000000001p-1022
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0xbfdfffff, 0xffffffff), 0x80080000, 0x00000001, STR(__LINE__)); // 0x1.0000000000003p-1022 * -0x1.fffffffffffffp-2 = -0x0.8000000000001p-1022
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0x3fdfffff, 0xffffffff), 0x80080000, 0x00000001, STR(__LINE__)); // -0x1.0000000000003p-1022 * 0x1.fffffffffffffp-2 = -0x0.8000000000001p-1022
+ comp64(double_of_bits(0x3fdfffff, 0xffffffff) * double_of_bits(0x80100000, 0x00000003), 0x80080000, 0x00000001, STR(__LINE__)); // 0x1.fffffffffffffp-2 * -0x1.0000000000003p-1022 = -0x0.8000000000001p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * 0x1.0000000000001p+0 = 0x1p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x000fffff, 0xffffffff), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x0.fffffffffffffp-1022 = 0x1p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -0x1.0000000000001p+0 = 0x1p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x800fffff, 0xffffffff), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x0.fffffffffffffp-1022 = 0x1p-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * 0x1.0000000000001p+0 = -0x1p-1022
+}
+
+void f230(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x800fffff, 0xffffffff), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x0.fffffffffffffp-1022 = -0x1p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x000fffff, 0xffffffff), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x0.fffffffffffffp-1022 = -0x1p-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * -0x1.0000000000001p+0 = -0x1p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000008) * double_of_bits(0x000fffff, 0xfffffff8), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000008p+0 * 0x0.ffffffffffff8p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000008), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffff8p-1022 * 0x1.0000000000008p+0 = 0x1p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000008), 0x00100000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffff8p-1022 * -0x1.0000000000008p+0 = 0x1p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000008) * double_of_bits(0x800fffff, 0xfffffff8), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+0 * -0x0.ffffffffffff8p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffff8) * double_of_bits(0x3ff00000, 0x00000008), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.ffffffffffff8p-1022 * 0x1.0000000000008p+0 = -0x1p-1022
+ comp64(double_of_bits(0x3ff00000, 0x00000008) * double_of_bits(0x800fffff, 0xfffffff8), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000008p+0 * -0x0.ffffffffffff8p-1022 = -0x1p-1022
+ comp64(double_of_bits(0xbff00000, 0x00000008) * double_of_bits(0x000fffff, 0xfffffff8), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000008p+0 * 0x0.ffffffffffff8p-1022 = -0x1p-1022
+}
+
+void f231(void) {
+ comp64(double_of_bits(0x000fffff, 0xfffffff8) * double_of_bits(0xbff00000, 0x00000008), 0x80100000, 0x00000000, STR(__LINE__)); // 0x0.ffffffffffff8p-1022 * -0x1.0000000000008p+0 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffe), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1.ffffffffffffep-1 = 0x1p-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x00100000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1.0000000000001p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffe), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1.ffffffffffffep-1 = 0x1p-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x80100000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1.0000000000001p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffe), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1.ffffffffffffep-1 = -0x1p-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x80100000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1.0000000000001p-1022 = -0x1p-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x00100000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1.0000000000001p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffe), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1.ffffffffffffep-1 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000002) * double_of_bits(0x3fefffff, 0xfffffffc), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p-1022 * 0x1.ffffffffffffcp-1 = 0x1p-1022
+}
+
+void f232(void) {
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0x00100000, 0x00000002), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * 0x1.0000000000002p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000002) * double_of_bits(0xbfefffff, 0xfffffffc), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p-1022 * -0x1.ffffffffffffcp-1 = 0x1p-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0x80100000, 0x00000002), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * -0x1.0000000000002p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80100000, 0x00000002) * double_of_bits(0x3fefffff, 0xfffffffc), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.0000000000002p-1022 * 0x1.ffffffffffffcp-1 = -0x1p-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0x80100000, 0x00000002), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * -0x1.0000000000002p-1022 = -0x1p-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0x00100000, 0x00000002), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * 0x1.0000000000002p-1022 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000002) * double_of_bits(0xbfefffff, 0xfffffffc), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.0000000000002p-1022 * -0x1.ffffffffffffcp-1 = -0x1p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000002) * double_of_bits(0x001fffff, 0xfffffffb), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000002p-1 * 0x1.ffffffffffffbp-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffb) * double_of_bits(0x3fe00000, 0x00000002), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffbp-1022 * 0x1.0000000000002p-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000002) * double_of_bits(0x801fffff, 0xfffffffb), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1.0000000000002p-1 * -0x1.ffffffffffffbp-1022 = 0x0.fffffffffffffp-1022
+}
+
+void f233(void) {
+ comp64(double_of_bits(0x801fffff, 0xfffffffb) * double_of_bits(0xbfe00000, 0x00000002), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffbp-1022 * -0x1.0000000000002p-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000002) * double_of_bits(0x001fffff, 0xfffffffb), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.0000000000002p-1 * 0x1.ffffffffffffbp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffb) * double_of_bits(0xbfe00000, 0x00000002), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffbp-1022 * -0x1.0000000000002p-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffb) * double_of_bits(0x3fe00000, 0x00000002), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffbp-1022 * 0x1.0000000000002p-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000002) * double_of_bits(0x801fffff, 0xfffffffb), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1.0000000000002p-1 * -0x1.ffffffffffffbp-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x3fe00000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * 0x1p-1 = 0x1p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x001fffff, 0xffffffff), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1p-1 * 0x1.fffffffffffffp-1022 = 0x1p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0xbfe00000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -0x1p-1 = 0x1p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x801fffff, 0xffffffff), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1p-1 * -0x1.fffffffffffffp-1022 = 0x1p-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x3fe00000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * 0x1p-1 = -0x1p-1022
+}
+
+void f234(void) {
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x801fffff, 0xffffffff), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1p-1 * -0x1.fffffffffffffp-1022 = -0x1p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x001fffff, 0xffffffff), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1p-1 * 0x1.fffffffffffffp-1022 = -0x1p-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0xbfe00000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * -0x1p-1 = -0x1p-1022
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0x3fefffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1022 * 0x1.ffffffffffffep-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x00100000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1p-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0xbfefffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1022 * -0x1.ffffffffffffep-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x80100000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1p-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x80100000, 0x00000000) * double_of_bits(0x3fefffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1022 * 0x1.ffffffffffffep-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x80100000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1p-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x00100000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1p-1022 = -0x0.fffffffffffffp-1022
+}
+
+void f235(void) {
+ comp64(double_of_bits(0x00100000, 0x00000000) * double_of_bits(0xbfefffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1022 * -0x1.ffffffffffffep-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffe) * double_of_bits(0x3fe00000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1022 * 0x1p-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1 * 0x1.ffffffffffffep-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffe) * double_of_bits(0xbfe00000, 0x00000000), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep-1022 * -0x1p-1 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffe), 0x000fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1 * -0x1.ffffffffffffep-1022 = 0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffe) * double_of_bits(0x3fe00000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep-1022 * 0x1p-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1p-1 * -0x1.ffffffffffffep-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffe), 0x800fffff, 0xffffffff, STR(__LINE__)); // -0x1p-1 * 0x1.ffffffffffffep-1022 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffe) * double_of_bits(0xbfe00000, 0x00000000), 0x800fffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep-1022 * -0x1p-1 = -0x0.fffffffffffffp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffc) * double_of_bits(0x3fe00000, 0x00000000), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffcp-1022 * 0x1p-1 = 0x0.ffffffffffffep-1022
+}
+
+void f236(void) {
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffc), 0x000fffff, 0xfffffffe, STR(__LINE__)); // 0x1p-1 * 0x1.ffffffffffffcp-1022 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffc) * double_of_bits(0xbfe00000, 0x00000000), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffcp-1022 * -0x1p-1 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffc), 0x000fffff, 0xfffffffe, STR(__LINE__)); // -0x1p-1 * -0x1.ffffffffffffcp-1022 = 0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffffc) * double_of_bits(0x3fe00000, 0x00000000), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffcp-1022 * 0x1p-1 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffffc), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x1p-1 * -0x1.ffffffffffffcp-1022 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffffc), 0x800fffff, 0xfffffffe, STR(__LINE__)); // -0x1p-1 * 0x1.ffffffffffffcp-1022 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffffc) * double_of_bits(0xbfe00000, 0x00000000), 0x800fffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffcp-1022 * -0x1p-1 = -0x0.ffffffffffffep-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffff8) * double_of_bits(0x3fe00000, 0x00000000), 0x000fffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffff8p-1022 * 0x1p-1 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffff8), 0x000fffff, 0xfffffffc, STR(__LINE__)); // 0x1p-1 * 0x1.ffffffffffff8p-1022 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffff8) * double_of_bits(0xbfe00000, 0x00000000), 0x000fffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffff8p-1022 * -0x1p-1 = 0x0.ffffffffffffcp-1022
+}
+
+void f237(void) {
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffff8), 0x000fffff, 0xfffffffc, STR(__LINE__)); // -0x1p-1 * -0x1.ffffffffffff8p-1022 = 0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x801fffff, 0xfffffff8) * double_of_bits(0x3fe00000, 0x00000000), 0x800fffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffff8p-1022 * 0x1p-1 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x801fffff, 0xfffffff8), 0x800fffff, 0xfffffffc, STR(__LINE__)); // 0x1p-1 * -0x1.ffffffffffff8p-1022 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x001fffff, 0xfffffff8), 0x800fffff, 0xfffffffc, STR(__LINE__)); // -0x1p-1 * 0x1.ffffffffffff8p-1022 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x001fffff, 0xfffffff8) * double_of_bits(0xbfe00000, 0x00000000), 0x800fffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffff8p-1022 * -0x1p-1 = -0x0.ffffffffffffcp-1022
+ comp64(double_of_bits(0x00000000, 0x00000008) * double_of_bits(0x3fc00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000008p-1022 * 0x1p-3 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0x00000000, 0x00000008), 0x00000000, 0x00000001, STR(__LINE__)); // 0x1p-3 * 0x0.0000000000008p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000008) * double_of_bits(0xbfc00000, 0x00000000), 0x00000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000008p-1022 * -0x1p-3 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0x80000000, 0x00000008), 0x00000000, 0x00000001, STR(__LINE__)); // -0x1p-3 * -0x0.0000000000008p-1022 = 0x0.0000000000001p-1022
+ comp64(double_of_bits(0x80000000, 0x00000008) * double_of_bits(0x3fc00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // -0x0.0000000000008p-1022 * 0x1p-3 = -0x0.0000000000001p-1022
+}
+
+void f238(void) {
+ comp64(double_of_bits(0x3fc00000, 0x00000000) * double_of_bits(0x80000000, 0x00000008), 0x80000000, 0x00000001, STR(__LINE__)); // 0x1p-3 * -0x0.0000000000008p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0xbfc00000, 0x00000000) * double_of_bits(0x00000000, 0x00000008), 0x80000000, 0x00000001, STR(__LINE__)); // -0x1p-3 * 0x0.0000000000008p-1022 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000008) * double_of_bits(0xbfc00000, 0x00000000), 0x80000000, 0x00000001, STR(__LINE__)); // 0x0.0000000000008p-1022 * -0x1p-3 = -0x0.0000000000001p-1022
+ comp64(double_of_bits(0x00000000, 0x00000006) * double_of_bits(0x3fe00000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000006p-1022 * 0x1p-1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000006), 0x00000000, 0x00000003, STR(__LINE__)); // 0x1p-1 * 0x0.0000000000006p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000006) * double_of_bits(0xbfe00000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000006p-1022 * -0x1p-1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000006), 0x00000000, 0x00000003, STR(__LINE__)); // -0x1p-1 * -0x0.0000000000006p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000006) * double_of_bits(0x3fe00000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000006p-1022 * 0x1p-1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x80000000, 0x00000006), 0x80000000, 0x00000003, STR(__LINE__)); // 0x1p-1 * -0x0.0000000000006p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x00000000, 0x00000006), 0x80000000, 0x00000003, STR(__LINE__)); // -0x1p-1 * 0x0.0000000000006p-1022 = -0x0.0000000000003p-1022
+}
+
+void f239(void) {
+ comp64(double_of_bits(0x00000000, 0x00000006) * double_of_bits(0xbfe00000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000006p-1022 * -0x1p-1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * 0x1.fffffffffffffp-1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x4007ffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x1.7ffffffffffffp+1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * -0x1.fffffffffffffp-1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0xc007ffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.7ffffffffffffp+1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * 0x1.fffffffffffffp-1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0xc007ffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x1.7ffffffffffffp+1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x4007ffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x1.7ffffffffffffp+1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * -0x1.fffffffffffffp-1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x4013ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0x4013ffff, 0xfffffffe, STR(__LINE__)); // 0x1.3ffffffffffffp+2 * 0x1.fffffffffffffp-1 = 0x1.3fffffffffffep+2
+}
+
+void f240(void) {
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x4013ffff, 0xffffffff), 0x4013ffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x1.3ffffffffffffp+2 = 0x1.3fffffffffffep+2
+ comp64(double_of_bits(0xc013ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x4013ffff, 0xfffffffe, STR(__LINE__)); // -0x1.3ffffffffffffp+2 * -0x1.fffffffffffffp-1 = 0x1.3fffffffffffep+2
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0xc013ffff, 0xffffffff), 0x4013ffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.3ffffffffffffp+2 = 0x1.3fffffffffffep+2
+ comp64(double_of_bits(0xc013ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0xc013ffff, 0xfffffffe, STR(__LINE__)); // -0x1.3ffffffffffffp+2 * 0x1.fffffffffffffp-1 = -0x1.3fffffffffffep+2
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0xc013ffff, 0xffffffff), 0xc013ffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x1.3ffffffffffffp+2 = -0x1.3fffffffffffep+2
+ comp64(double_of_bits(0x4013ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0xc013ffff, 0xfffffffe, STR(__LINE__)); // 0x1.3ffffffffffffp+2 * -0x1.fffffffffffffp-1 = -0x1.3fffffffffffep+2
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x4013ffff, 0xffffffff), 0xc013ffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x1.3ffffffffffffp+2 = -0x1.3fffffffffffep+2
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0x401bffff, 0xfffffffe, STR(__LINE__)); // 0x1.bffffffffffffp+2 * 0x1.fffffffffffffp-1 = 0x1.bfffffffffffep+2
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x401bffff, 0xffffffff), 0x401bffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x1.bffffffffffffp+2 = 0x1.bfffffffffffep+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x401bffff, 0xfffffffe, STR(__LINE__)); // -0x1.bffffffffffffp+2 * -0x1.fffffffffffffp-1 = 0x1.bfffffffffffep+2
+}
+
+void f241(void) {
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0xc01bffff, 0xffffffff), 0x401bffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.bffffffffffffp+2 = 0x1.bfffffffffffep+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0xc01bffff, 0xfffffffe, STR(__LINE__)); // -0x1.bffffffffffffp+2 * 0x1.fffffffffffffp-1 = -0x1.bfffffffffffep+2
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0xc01bffff, 0xffffffff), 0xc01bffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x1.bffffffffffffp+2 = -0x1.bfffffffffffep+2
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x401bffff, 0xffffffff), 0xc01bffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x1.bffffffffffffp+2 = -0x1.bfffffffffffep+2
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0xc01bffff, 0xfffffffe, STR(__LINE__)); // 0x1.bffffffffffffp+2 * -0x1.fffffffffffffp-1 = -0x1.bfffffffffffep+2
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * 0x1.fffffffffffffp-1 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x001fffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x1.fffffffffffffp-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * -0x1.fffffffffffffp-1 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x801fffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.fffffffffffffp-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x801fffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1022 * 0x1.fffffffffffffp-1 = -0x1.ffffffffffffep-1022
+}
+
+void f242(void) {
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x801fffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x1.fffffffffffffp-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x001fffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x1.fffffffffffffp-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x001fffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x1.fffffffffffffp-1022 * -0x1.fffffffffffffp-1 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x7fcfffff, 0xfffffff9) * double_of_bits(0x400fffff, 0xffffffff), 0x7fefffff, 0xfffffff8, STR(__LINE__)); // 0x1.ffffffffffff9p+1021 * 0x1.fffffffffffffp+1 = 0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0x400fffff, 0xffffffff) * double_of_bits(0x7fcfffff, 0xfffffff9), 0x7fefffff, 0xfffffff8, STR(__LINE__)); // 0x1.fffffffffffffp+1 * 0x1.ffffffffffff9p+1021 = 0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0xffcfffff, 0xfffffff9) * double_of_bits(0xc00fffff, 0xffffffff), 0x7fefffff, 0xfffffff8, STR(__LINE__)); // -0x1.ffffffffffff9p+1021 * -0x1.fffffffffffffp+1 = 0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) * double_of_bits(0xffcfffff, 0xfffffff9), 0x7fefffff, 0xfffffff8, STR(__LINE__)); // -0x1.fffffffffffffp+1 * -0x1.ffffffffffff9p+1021 = 0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0xffcfffff, 0xfffffff9) * double_of_bits(0x400fffff, 0xffffffff), 0xffefffff, 0xfffffff8, STR(__LINE__)); // -0x1.ffffffffffff9p+1021 * 0x1.fffffffffffffp+1 = -0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0x400fffff, 0xffffffff) * double_of_bits(0xffcfffff, 0xfffffff9), 0xffefffff, 0xfffffff8, STR(__LINE__)); // 0x1.fffffffffffffp+1 * -0x1.ffffffffffff9p+1021 = -0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0xc00fffff, 0xffffffff) * double_of_bits(0x7fcfffff, 0xfffffff9), 0xffefffff, 0xfffffff8, STR(__LINE__)); // -0x1.fffffffffffffp+1 * 0x1.ffffffffffff9p+1021 = -0x1.ffffffffffff8p+1023
+}
+
+void f243(void) {
+ comp64(double_of_bits(0x7fcfffff, 0xfffffff9) * double_of_bits(0xc00fffff, 0xffffffff), 0xffefffff, 0xfffffff8, STR(__LINE__)); // 0x1.ffffffffffff9p+1021 * -0x1.fffffffffffffp+1 = -0x1.ffffffffffff8p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.0000000000001p+0 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.0000000000001p+0 = 0x1.0000000000002p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.0000000000001p+0 = -0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0xbff00000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.0000000000001p+0 = -0x1.0000000000002p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) * double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+0 * 0x1.0000000000001p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000002), 0x3ff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.0000000000002p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0xbff00000, 0x00000002) * double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000002p+0 * -0x1.0000000000001p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xbff00000, 0x00000002), 0x3ff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.0000000000002p+0 = 0x1.0000000000003p+0
+ comp64(double_of_bits(0xbff00000, 0x00000002) * double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000002p+0 * 0x1.0000000000001p+0 = -0x1.0000000000003p+0
+}
+
+void f244(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xbff00000, 0x00000002), 0xbff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.0000000000002p+0 = -0x1.0000000000003p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000002) * double_of_bits(0xbff00000, 0x00000001), 0xbff00000, 0x00000003, STR(__LINE__)); // 0x1.0000000000002p+0 * -0x1.0000000000001p+0 = -0x1.0000000000003p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000002), 0xbff00000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.0000000000002p+0 = -0x1.0000000000003p+0
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0xffefffff, 0xffffffff), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.fffffffffffffp+1023 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0xffefffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xffffffff), 0x7fefffff, 0xfffffffe, STR(__LINE__)); // -0x1.fffffffffffffp+1023 * -0x1.fffffffffffffp-1 = 0x1.ffffffffffffep+1023
+ comp64(double_of_bits(0x00080000, 0x00000001) * double_of_bits(0x40000000, 0x00000001), 0x00100000, 0x00000003, STR(__LINE__)); // 0x0.8000000000001p-1022 * 0x1.0000000000001p+1 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x00080000, 0x00000001), 0x00100000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+1 * 0x0.8000000000001p-1022 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0x80080000, 0x00000001) * double_of_bits(0xc0000000, 0x00000001), 0x00100000, 0x00000003, STR(__LINE__)); // -0x0.8000000000001p-1022 * -0x1.0000000000001p+1 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x80080000, 0x00000001), 0x00100000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+1 * -0x0.8000000000001p-1022 = 0x1.0000000000003p-1022
+ comp64(double_of_bits(0x80080000, 0x00000001) * double_of_bits(0x40000000, 0x00000001), 0x80100000, 0x00000003, STR(__LINE__)); // -0x0.8000000000001p-1022 * 0x1.0000000000001p+1 = -0x1.0000000000003p-1022
+}
+
+void f245(void) {
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x80080000, 0x00000001), 0x80100000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+1 * -0x0.8000000000001p-1022 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x00080000, 0x00000001), 0x80100000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+1 * 0x0.8000000000001p-1022 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0x00080000, 0x00000001) * double_of_bits(0xc0000000, 0x00000001), 0x80100000, 0x00000003, STR(__LINE__)); // 0x0.8000000000001p-1022 * -0x1.0000000000001p+1 = -0x1.0000000000003p-1022
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0x40080000, 0x00000000), 0x4021ffff, 0xffffffff, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * 0x1.8p+1 = 0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x4007ffff, 0xffffffff), 0x4021ffff, 0xffffffff, STR(__LINE__)); // 0x1.8p+1 * 0x1.7ffffffffffffp+1 = 0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0xc0080000, 0x00000000), 0x4021ffff, 0xffffffff, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * -0x1.8p+1 = 0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xc007ffff, 0xffffffff), 0x4021ffff, 0xffffffff, STR(__LINE__)); // -0x1.8p+1 * -0x1.7ffffffffffffp+1 = 0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0x40080000, 0x00000000), 0xc021ffff, 0xffffffff, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * 0x1.8p+1 = -0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0xc007ffff, 0xffffffff), 0xc021ffff, 0xffffffff, STR(__LINE__)); // 0x1.8p+1 * -0x1.7ffffffffffffp+1 = -0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x4007ffff, 0xffffffff), 0xc021ffff, 0xffffffff, STR(__LINE__)); // -0x1.8p+1 * 0x1.7ffffffffffffp+1 = -0x1.1ffffffffffffp+3
+}
+
+void f246(void) {
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0xc0080000, 0x00000000), 0xc021ffff, 0xffffffff, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * -0x1.8p+1 = -0x1.1ffffffffffffp+3
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x7fc00000, 0x00000001), 0x7fe40000, 0x00000001, STR(__LINE__)); // 0x1.4p+2 * 0x1.0000000000001p+1021 = 0x1.4000000000001p+1023
+ comp64(double_of_bits(0x7fc00000, 0x00000001) * double_of_bits(0x40140000, 0x00000000), 0x7fe40000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1021 * 0x1.4p+2 = 0x1.4000000000001p+1023
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0xffc00000, 0x00000001), 0x7fe40000, 0x00000001, STR(__LINE__)); // -0x1.4p+2 * -0x1.0000000000001p+1021 = 0x1.4000000000001p+1023
+ comp64(double_of_bits(0xffc00000, 0x00000001) * double_of_bits(0xc0140000, 0x00000000), 0x7fe40000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1021 * -0x1.4p+2 = 0x1.4000000000001p+1023
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0x7fc00000, 0x00000001), 0xffe40000, 0x00000001, STR(__LINE__)); // -0x1.4p+2 * 0x1.0000000000001p+1021 = -0x1.4000000000001p+1023
+ comp64(double_of_bits(0x7fc00000, 0x00000001) * double_of_bits(0xc0140000, 0x00000000), 0xffe40000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1021 * -0x1.4p+2 = -0x1.4000000000001p+1023
+ comp64(double_of_bits(0xffc00000, 0x00000001) * double_of_bits(0x40140000, 0x00000000), 0xffe40000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1021 * 0x1.4p+2 = -0x1.4000000000001p+1023
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0xffc00000, 0x00000001), 0xffe40000, 0x00000001, STR(__LINE__)); // 0x1.4p+2 * -0x1.0000000000001p+1021 = -0x1.4000000000001p+1023
+ comp64(double_of_bits(0x00240000, 0x00000000) * double_of_bits(0x40000000, 0x00000001), 0x00340000, 0x00000001, STR(__LINE__)); // 0x1.4p-1021 * 0x1.0000000000001p+1 = 0x1.4000000000001p-1020
+}
+
+void f247(void) {
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x00240000, 0x00000000), 0x00340000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1 * 0x1.4p-1021 = 0x1.4000000000001p-1020
+ comp64(double_of_bits(0x80240000, 0x00000000) * double_of_bits(0xc0000000, 0x00000001), 0x00340000, 0x00000001, STR(__LINE__)); // -0x1.4p-1021 * -0x1.0000000000001p+1 = 0x1.4000000000001p-1020
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x80240000, 0x00000000), 0x00340000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1 * -0x1.4p-1021 = 0x1.4000000000001p-1020
+ comp64(double_of_bits(0x80240000, 0x00000000) * double_of_bits(0x40000000, 0x00000001), 0x80340000, 0x00000001, STR(__LINE__)); // -0x1.4p-1021 * 0x1.0000000000001p+1 = -0x1.4000000000001p-1020
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x80240000, 0x00000000), 0x80340000, 0x00000001, STR(__LINE__)); // 0x1.0000000000001p+1 * -0x1.4p-1021 = -0x1.4000000000001p-1020
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x00240000, 0x00000000), 0x80340000, 0x00000001, STR(__LINE__)); // -0x1.0000000000001p+1 * 0x1.4p-1021 = -0x1.4000000000001p-1020
+ comp64(double_of_bits(0x00240000, 0x00000000) * double_of_bits(0xc0000000, 0x00000001), 0x80340000, 0x00000001, STR(__LINE__)); // 0x1.4p-1021 * -0x1.0000000000001p+1 = -0x1.4000000000001p-1020
+ comp64(double_of_bits(0x40140000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0x40140000, 0x00000002, STR(__LINE__)); // 0x1.4000000000001p+2 * 0x1.0000000000001p+0 = 0x1.4000000000002p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x40140000, 0x00000001), 0x40140000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.4000000000001p+2 = 0x1.4000000000002p+2
+ comp64(double_of_bits(0xc0140000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0x40140000, 0x00000002, STR(__LINE__)); // -0x1.4000000000001p+2 * -0x1.0000000000001p+0 = 0x1.4000000000002p+2
+}
+
+void f248(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xc0140000, 0x00000001), 0x40140000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.4000000000001p+2 = 0x1.4000000000002p+2
+ comp64(double_of_bits(0xc0140000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0xc0140000, 0x00000002, STR(__LINE__)); // -0x1.4000000000001p+2 * 0x1.0000000000001p+0 = -0x1.4000000000002p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xc0140000, 0x00000001), 0xc0140000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.4000000000001p+2 = -0x1.4000000000002p+2
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x40140000, 0x00000001), 0xc0140000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.4000000000001p+2 = -0x1.4000000000002p+2
+ comp64(double_of_bits(0x40140000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0xc0140000, 0x00000002, STR(__LINE__)); // 0x1.4000000000001p+2 * -0x1.0000000000001p+0 = -0x1.4000000000002p+2
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0x401bffff, 0xfffffffd, STR(__LINE__)); // 0x1.bffffffffffffp+2 * 0x1.ffffffffffffep-1 = 0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x401bffff, 0xffffffff), 0x401bffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1.bffffffffffffp+2 = 0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0x401bffff, 0xfffffffd, STR(__LINE__)); // -0x1.bffffffffffffp+2 * -0x1.ffffffffffffep-1 = 0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0xc01bffff, 0xffffffff), 0x401bffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1.bffffffffffffp+2 = 0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0xc01bffff, 0xfffffffd, STR(__LINE__)); // -0x1.bffffffffffffp+2 * 0x1.ffffffffffffep-1 = -0x1.bfffffffffffdp+2
+}
+
+void f249(void) {
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0xc01bffff, 0xffffffff), 0xc01bffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1.bffffffffffffp+2 = -0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x401bffff, 0xffffffff), 0xc01bffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1.bffffffffffffp+2 = -0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0xc01bffff, 0xfffffffd, STR(__LINE__)); // 0x1.bffffffffffffp+2 * -0x1.ffffffffffffep-1 = -0x1.bfffffffffffdp+2
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // 0x1.fffffffffffffp-1 * 0x1.ffffffffffffep-1 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x3fefffff, 0xffffffff), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1.fffffffffffffp-1 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // -0x1.fffffffffffffp-1 * -0x1.ffffffffffffep-1 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0xbfefffff, 0xffffffff), 0x3fefffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1.fffffffffffffp-1 = 0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0xbfefffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0xbfefffff, 0xfffffffd, STR(__LINE__)); // -0x1.fffffffffffffp-1 * 0x1.ffffffffffffep-1 = -0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0xbfefffff, 0xffffffff), 0xbfefffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1.fffffffffffffp-1 = -0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x3fefffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0xbfefffff, 0xfffffffd, STR(__LINE__)); // 0x1.fffffffffffffp-1 * -0x1.ffffffffffffep-1 = -0x1.ffffffffffffdp-1
+}
+
+void f250(void) {
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x3fefffff, 0xffffffff), 0xbfefffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1.fffffffffffffp-1 = -0x1.ffffffffffffdp-1
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * 0x1.0000000000001p+0 = 0x1p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x7fdfffff, 0xffffffff), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.fffffffffffffp+1022 = 0x1p+1023
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * -0x1.0000000000001p+0 = 0x1p+1023
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xffdfffff, 0xffffffff), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.fffffffffffffp+1022 = 0x1p+1023
+ comp64(double_of_bits(0xffdfffff, 0xffffffff) * double_of_bits(0x3ff00000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1022 * 0x1.0000000000001p+0 = -0x1p+1023
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xffdfffff, 0xffffffff), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.fffffffffffffp+1022 = -0x1p+1023
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x7fdfffff, 0xffffffff), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.fffffffffffffp+1022 = -0x1p+1023
+ comp64(double_of_bits(0x7fdfffff, 0xffffffff) * double_of_bits(0xbff00000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1022 * -0x1.0000000000001p+0 = -0x1p+1023
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * 0x1.0000000000001p+1 = 0x1p+1023
+}
+
+void f251(void) {
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x7fcfffff, 0xffffffff), 0x7fe00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1 * 0x1.fffffffffffffp+1021 = 0x1p+1023
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000001), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * -0x1.0000000000001p+1 = 0x1p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0xffcfffff, 0xffffffff), 0x7fe00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1 * -0x1.fffffffffffffp+1021 = 0x1p+1023
+ comp64(double_of_bits(0xffcfffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1.fffffffffffffp+1021 * 0x1.0000000000001p+1 = -0x1p+1023
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0xffcfffff, 0xffffffff), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+1 * -0x1.fffffffffffffp+1021 = -0x1p+1023
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x7fcfffff, 0xffffffff), 0xffe00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+1 * 0x1.fffffffffffffp+1021 = -0x1p+1023
+ comp64(double_of_bits(0x7fcfffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000001), 0xffe00000, 0x00000000, STR(__LINE__)); // 0x1.fffffffffffffp+1021 * -0x1.0000000000001p+1 = -0x1p+1023
+ comp64(double_of_bits(0x00240000, 0x00000001) * double_of_bits(0x40000000, 0x00000001), 0x00340000, 0x00000002, STR(__LINE__)); // 0x1.4000000000001p-1021 * 0x1.0000000000001p+1 = 0x1.4000000000002p-1020
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x00240000, 0x00000001), 0x00340000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+1 * 0x1.4000000000001p-1021 = 0x1.4000000000002p-1020
+ comp64(double_of_bits(0x80240000, 0x00000001) * double_of_bits(0xc0000000, 0x00000001), 0x00340000, 0x00000002, STR(__LINE__)); // -0x1.4000000000001p-1021 * -0x1.0000000000001p+1 = 0x1.4000000000002p-1020
+}
+
+void f252(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x80240000, 0x00000001), 0x00340000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+1 * -0x1.4000000000001p-1021 = 0x1.4000000000002p-1020
+ comp64(double_of_bits(0x80240000, 0x00000001) * double_of_bits(0x40000000, 0x00000001), 0x80340000, 0x00000002, STR(__LINE__)); // -0x1.4000000000001p-1021 * 0x1.0000000000001p+1 = -0x1.4000000000002p-1020
+ comp64(double_of_bits(0x40000000, 0x00000001) * double_of_bits(0x80240000, 0x00000001), 0x80340000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+1 * -0x1.4000000000001p-1021 = -0x1.4000000000002p-1020
+ comp64(double_of_bits(0xc0000000, 0x00000001) * double_of_bits(0x00240000, 0x00000001), 0x80340000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+1 * 0x1.4000000000001p-1021 = -0x1.4000000000002p-1020
+ comp64(double_of_bits(0x00240000, 0x00000001) * double_of_bits(0xc0000000, 0x00000001), 0x80340000, 0x00000002, STR(__LINE__)); // 0x1.4000000000001p-1021 * -0x1.0000000000001p+1 = -0x1.4000000000002p-1020
+ comp64(double_of_bits(0x40080000, 0x00000004) * double_of_bits(0x401c0000, 0x00000000), 0x40350000, 0x00000004, STR(__LINE__)); // 0x1.8000000000004p+1 * 0x1.cp+2 = 0x1.5000000000004p+4
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0x40080000, 0x00000004), 0x40350000, 0x00000004, STR(__LINE__)); // 0x1.cp+2 * 0x1.8000000000004p+1 = 0x1.5000000000004p+4
+ comp64(double_of_bits(0xc0080000, 0x00000004) * double_of_bits(0xc01c0000, 0x00000000), 0x40350000, 0x00000004, STR(__LINE__)); // -0x1.8000000000004p+1 * -0x1.cp+2 = 0x1.5000000000004p+4
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0xc0080000, 0x00000004), 0x40350000, 0x00000004, STR(__LINE__)); // -0x1.cp+2 * -0x1.8000000000004p+1 = 0x1.5000000000004p+4
+ comp64(double_of_bits(0xc0080000, 0x00000004) * double_of_bits(0x401c0000, 0x00000000), 0xc0350000, 0x00000004, STR(__LINE__)); // -0x1.8000000000004p+1 * 0x1.cp+2 = -0x1.5000000000004p+4
+}
+
+void f253(void) {
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0xc0080000, 0x00000004), 0xc0350000, 0x00000004, STR(__LINE__)); // 0x1.cp+2 * -0x1.8000000000004p+1 = -0x1.5000000000004p+4
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0x40080000, 0x00000004), 0xc0350000, 0x00000004, STR(__LINE__)); // -0x1.cp+2 * 0x1.8000000000004p+1 = -0x1.5000000000004p+4
+ comp64(double_of_bits(0x40080000, 0x00000004) * double_of_bits(0xc01c0000, 0x00000000), 0xc0350000, 0x00000004, STR(__LINE__)); // 0x1.8000000000004p+1 * -0x1.cp+2 = -0x1.5000000000004p+4
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000001), 0x40080000, 0x00000002, STR(__LINE__)); // 0x1.8p+1 * 0x1.0000000000001p+0 = 0x1.8000000000002p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x40080000, 0x00000000), 0x40080000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.8p+1 = 0x1.8000000000002p+1
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xbff00000, 0x00000001), 0x40080000, 0x00000002, STR(__LINE__)); // -0x1.8p+1 * -0x1.0000000000001p+0 = 0x1.8000000000002p+1
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xc0080000, 0x00000000), 0x40080000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.8p+1 = 0x1.8000000000002p+1
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000001), 0xc0080000, 0x00000002, STR(__LINE__)); // -0x1.8p+1 * 0x1.0000000000001p+0 = -0x1.8000000000002p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xc0080000, 0x00000000), 0xc0080000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.8p+1 = -0x1.8000000000002p+1
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x40080000, 0x00000000), 0xc0080000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.8p+1 = -0x1.8000000000002p+1
+}
+
+void f254(void) {
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0xbff00000, 0x00000001), 0xc0080000, 0x00000002, STR(__LINE__)); // 0x1.8p+1 * -0x1.0000000000001p+0 = -0x1.8000000000002p+1
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x00100000, 0x00000003), 0x00080000, 0x00000002, STR(__LINE__)); // 0x1p-1 * 0x1.0000000000003p-1022 = 0x0.8000000000002p-1022
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0x3fe00000, 0x00000000), 0x00080000, 0x00000002, STR(__LINE__)); // 0x1.0000000000003p-1022 * 0x1p-1 = 0x0.8000000000002p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x80100000, 0x00000003), 0x00080000, 0x00000002, STR(__LINE__)); // -0x1p-1 * -0x1.0000000000003p-1022 = 0x0.8000000000002p-1022
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0xbfe00000, 0x00000000), 0x00080000, 0x00000002, STR(__LINE__)); // -0x1.0000000000003p-1022 * -0x1p-1 = 0x0.8000000000002p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x00100000, 0x00000003), 0x80080000, 0x00000002, STR(__LINE__)); // -0x1p-1 * 0x1.0000000000003p-1022 = -0x0.8000000000002p-1022
+ comp64(double_of_bits(0x00100000, 0x00000003) * double_of_bits(0xbfe00000, 0x00000000), 0x80080000, 0x00000002, STR(__LINE__)); // 0x1.0000000000003p-1022 * -0x1p-1 = -0x0.8000000000002p-1022
+ comp64(double_of_bits(0x80100000, 0x00000003) * double_of_bits(0x3fe00000, 0x00000000), 0x80080000, 0x00000002, STR(__LINE__)); // -0x1.0000000000003p-1022 * 0x1p-1 = -0x0.8000000000002p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x80100000, 0x00000003), 0x80080000, 0x00000002, STR(__LINE__)); // 0x1p-1 * -0x1.0000000000003p-1022 = -0x0.8000000000002p-1022
+ comp64(double_of_bits(0x40080000, 0x0000000c) * double_of_bits(0x401c0000, 0x00000000), 0x40350000, 0x0000000a, STR(__LINE__)); // 0x1.800000000000cp+1 * 0x1.cp+2 = 0x1.500000000000ap+4
+}
+
+void f255(void) {
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0x40080000, 0x0000000c), 0x40350000, 0x0000000a, STR(__LINE__)); // 0x1.cp+2 * 0x1.800000000000cp+1 = 0x1.500000000000ap+4
+ comp64(double_of_bits(0xc0080000, 0x0000000c) * double_of_bits(0xc01c0000, 0x00000000), 0x40350000, 0x0000000a, STR(__LINE__)); // -0x1.800000000000cp+1 * -0x1.cp+2 = 0x1.500000000000ap+4
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0xc0080000, 0x0000000c), 0x40350000, 0x0000000a, STR(__LINE__)); // -0x1.cp+2 * -0x1.800000000000cp+1 = 0x1.500000000000ap+4
+ comp64(double_of_bits(0xc0080000, 0x0000000c) * double_of_bits(0x401c0000, 0x00000000), 0xc0350000, 0x0000000a, STR(__LINE__)); // -0x1.800000000000cp+1 * 0x1.cp+2 = -0x1.500000000000ap+4
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0xc0080000, 0x0000000c), 0xc0350000, 0x0000000a, STR(__LINE__)); // 0x1.cp+2 * -0x1.800000000000cp+1 = -0x1.500000000000ap+4
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0x40080000, 0x0000000c), 0xc0350000, 0x0000000a, STR(__LINE__)); // -0x1.cp+2 * 0x1.800000000000cp+1 = -0x1.500000000000ap+4
+ comp64(double_of_bits(0x40080000, 0x0000000c) * double_of_bits(0xc01c0000, 0x00000000), 0xc0350000, 0x0000000a, STR(__LINE__)); // 0x1.800000000000cp+1 * -0x1.cp+2 = -0x1.500000000000ap+4
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000003), 0x40080000, 0x00000004, STR(__LINE__)); // 0x1.8p+1 * 0x1.0000000000003p+0 = 0x1.8000000000004p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000003) * double_of_bits(0x40080000, 0x00000000), 0x40080000, 0x00000004, STR(__LINE__)); // 0x1.0000000000003p+0 * 0x1.8p+1 = 0x1.8000000000004p+1
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0xbff00000, 0x00000003), 0x40080000, 0x00000004, STR(__LINE__)); // -0x1.8p+1 * -0x1.0000000000003p+0 = 0x1.8000000000004p+1
+}
+
+void f256(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000003) * double_of_bits(0xc0080000, 0x00000000), 0x40080000, 0x00000004, STR(__LINE__)); // -0x1.0000000000003p+0 * -0x1.8p+1 = 0x1.8000000000004p+1
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000003), 0xc0080000, 0x00000004, STR(__LINE__)); // -0x1.8p+1 * 0x1.0000000000003p+0 = -0x1.8000000000004p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000003) * double_of_bits(0xc0080000, 0x00000000), 0xc0080000, 0x00000004, STR(__LINE__)); // 0x1.0000000000003p+0 * -0x1.8p+1 = -0x1.8000000000004p+1
+ comp64(double_of_bits(0xbff00000, 0x00000003) * double_of_bits(0x40080000, 0x00000000), 0xc0080000, 0x00000004, STR(__LINE__)); // -0x1.0000000000003p+0 * 0x1.8p+1 = -0x1.8000000000004p+1
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0xbff00000, 0x00000003), 0xc0080000, 0x00000004, STR(__LINE__)); // 0x1.8p+1 * -0x1.0000000000003p+0 = -0x1.8000000000004p+1
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x00080000, 0x00000000, STR(__LINE__)); // 0x1p-1 * 0x1.0000000000001p-1022 = 0x0.8p-1022
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000000), 0x00080000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * 0x1p-1 = 0x0.8p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x00080000, 0x00000000, STR(__LINE__)); // -0x1p-1 * -0x1.0000000000001p-1022 = 0x0.8p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000000), 0x00080000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * -0x1p-1 = 0x0.8p-1022
+ comp64(double_of_bits(0xbfe00000, 0x00000000) * double_of_bits(0x00100000, 0x00000001), 0x80080000, 0x00000000, STR(__LINE__)); // -0x1p-1 * 0x1.0000000000001p-1022 = -0x0.8p-1022
+}
+
+void f257(void) {
+ comp64(double_of_bits(0x00100000, 0x00000001) * double_of_bits(0xbfe00000, 0x00000000), 0x80080000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p-1022 * -0x1p-1 = -0x0.8p-1022
+ comp64(double_of_bits(0x80100000, 0x00000001) * double_of_bits(0x3fe00000, 0x00000000), 0x80080000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p-1022 * 0x1p-1 = -0x0.8p-1022
+ comp64(double_of_bits(0x3fe00000, 0x00000000) * double_of_bits(0x80100000, 0x00000001), 0x80080000, 0x00000000, STR(__LINE__)); // 0x1p-1 * -0x1.0000000000001p-1022 = -0x0.8p-1022
+ comp64(double_of_bits(0x3ff40000, 0x00000000) * double_of_bits(0x3fffffff, 0xfffffffe), 0x4003ffff, 0xffffffff, STR(__LINE__)); // 0x1.4p+0 * 0x1.ffffffffffffep+0 = 0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0x3fffffff, 0xfffffffe) * double_of_bits(0x3ff40000, 0x00000000), 0x4003ffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep+0 * 0x1.4p+0 = 0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0xbff40000, 0x00000000) * double_of_bits(0xbfffffff, 0xfffffffe), 0x4003ffff, 0xffffffff, STR(__LINE__)); // -0x1.4p+0 * -0x1.ffffffffffffep+0 = 0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0xbfffffff, 0xfffffffe) * double_of_bits(0xbff40000, 0x00000000), 0x4003ffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep+0 * -0x1.4p+0 = 0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0xbff40000, 0x00000000) * double_of_bits(0x3fffffff, 0xfffffffe), 0xc003ffff, 0xffffffff, STR(__LINE__)); // -0x1.4p+0 * 0x1.ffffffffffffep+0 = -0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0x3fffffff, 0xfffffffe) * double_of_bits(0xbff40000, 0x00000000), 0xc003ffff, 0xffffffff, STR(__LINE__)); // 0x1.ffffffffffffep+0 * -0x1.4p+0 = -0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0xbfffffff, 0xfffffffe) * double_of_bits(0x3ff40000, 0x00000000), 0xc003ffff, 0xffffffff, STR(__LINE__)); // -0x1.ffffffffffffep+0 * 0x1.4p+0 = -0x1.3ffffffffffffp+1
+}
+
+void f258(void) {
+ comp64(double_of_bits(0x3ff40000, 0x00000000) * double_of_bits(0xbfffffff, 0xfffffffe), 0xc003ffff, 0xffffffff, STR(__LINE__)); // 0x1.4p+0 * -0x1.ffffffffffffep+0 = -0x1.3ffffffffffffp+1
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000001), 0x401c0000, 0x00000002, STR(__LINE__)); // 0x1.cp+2 * 0x1.0000000000001p+0 = 0x1.c000000000002p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x401c0000, 0x00000000), 0x401c0000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.cp+2 = 0x1.c000000000002p+2
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0xbff00000, 0x00000001), 0x401c0000, 0x00000002, STR(__LINE__)); // -0x1.cp+2 * -0x1.0000000000001p+0 = 0x1.c000000000002p+2
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xc01c0000, 0x00000000), 0x401c0000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.cp+2 = 0x1.c000000000002p+2
+ comp64(double_of_bits(0xc01c0000, 0x00000000) * double_of_bits(0x3ff00000, 0x00000001), 0xc01c0000, 0x00000002, STR(__LINE__)); // -0x1.cp+2 * 0x1.0000000000001p+0 = -0x1.c000000000002p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xc01c0000, 0x00000000), 0xc01c0000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.cp+2 = -0x1.c000000000002p+2
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x401c0000, 0x00000000), 0xc01c0000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.cp+2 = -0x1.c000000000002p+2
+ comp64(double_of_bits(0x401c0000, 0x00000000) * double_of_bits(0xbff00000, 0x00000001), 0xc01c0000, 0x00000002, STR(__LINE__)); // 0x1.cp+2 * -0x1.0000000000001p+0 = -0x1.c000000000002p+2
+ comp64(double_of_bits(0x002c0000, 0x00000000) * double_of_bits(0x40100000, 0x00000001), 0x004c0000, 0x00000002, STR(__LINE__)); // 0x1.cp-1021 * 0x1.0000000000001p+2 = 0x1.c000000000002p-1019
+}
+
+void f259(void) {
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x002c0000, 0x00000000), 0x004c0000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+2 * 0x1.cp-1021 = 0x1.c000000000002p-1019
+ comp64(double_of_bits(0x802c0000, 0x00000000) * double_of_bits(0xc0100000, 0x00000001), 0x004c0000, 0x00000002, STR(__LINE__)); // -0x1.cp-1021 * -0x1.0000000000001p+2 = 0x1.c000000000002p-1019
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x802c0000, 0x00000000), 0x004c0000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+2 * -0x1.cp-1021 = 0x1.c000000000002p-1019
+ comp64(double_of_bits(0x802c0000, 0x00000000) * double_of_bits(0x40100000, 0x00000001), 0x804c0000, 0x00000002, STR(__LINE__)); // -0x1.cp-1021 * 0x1.0000000000001p+2 = -0x1.c000000000002p-1019
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x802c0000, 0x00000000), 0x804c0000, 0x00000002, STR(__LINE__)); // 0x1.0000000000001p+2 * -0x1.cp-1021 = -0x1.c000000000002p-1019
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x002c0000, 0x00000000), 0x804c0000, 0x00000002, STR(__LINE__)); // -0x1.0000000000001p+2 * 0x1.cp-1021 = -0x1.c000000000002p-1019
+ comp64(double_of_bits(0x002c0000, 0x00000000) * double_of_bits(0xc0100000, 0x00000001), 0x804c0000, 0x00000002, STR(__LINE__)); // 0x1.cp-1021 * -0x1.0000000000001p+2 = -0x1.c000000000002p-1019
+ comp64(double_of_bits(0x40080000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0x40080000, 0x00000003, STR(__LINE__)); // 0x1.8000000000001p+1 * 0x1.0000000000001p+0 = 0x1.8000000000003p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x40080000, 0x00000001), 0x40080000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.8000000000001p+1 = 0x1.8000000000003p+1
+ comp64(double_of_bits(0xc0080000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0x40080000, 0x00000003, STR(__LINE__)); // -0x1.8000000000001p+1 * -0x1.0000000000001p+0 = 0x1.8000000000003p+1
+}
+
+void f260(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xc0080000, 0x00000001), 0x40080000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.8000000000001p+1 = 0x1.8000000000003p+1
+ comp64(double_of_bits(0xc0080000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0xc0080000, 0x00000003, STR(__LINE__)); // -0x1.8000000000001p+1 * 0x1.0000000000001p+0 = -0x1.8000000000003p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xc0080000, 0x00000001), 0xc0080000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.8000000000001p+1 = -0x1.8000000000003p+1
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x40080000, 0x00000001), 0xc0080000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.8000000000001p+1 = -0x1.8000000000003p+1
+ comp64(double_of_bits(0x40080000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0xc0080000, 0x00000003, STR(__LINE__)); // 0x1.8000000000001p+1 * -0x1.0000000000001p+0 = -0x1.8000000000003p+1
+ comp64(double_of_bits(0x40080000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000003), 0x40080000, 0x00000006, STR(__LINE__)); // 0x1.8000000000001p+1 * 0x1.0000000000003p+0 = 0x1.8000000000006p+1
+ comp64(double_of_bits(0x3ff00000, 0x00000003) * double_of_bits(0x40080000, 0x00000001), 0x40080000, 0x00000006, STR(__LINE__)); // 0x1.0000000000003p+0 * 0x1.8000000000001p+1 = 0x1.8000000000006p+1
+ comp64(double_of_bits(0xc0080000, 0x00000001) * double_of_bits(0xbff00000, 0x00000003), 0x40080000, 0x00000006, STR(__LINE__)); // -0x1.8000000000001p+1 * -0x1.0000000000003p+0 = 0x1.8000000000006p+1
+ comp64(double_of_bits(0xbff00000, 0x00000003) * double_of_bits(0xc0080000, 0x00000001), 0x40080000, 0x00000006, STR(__LINE__)); // -0x1.0000000000003p+0 * -0x1.8000000000001p+1 = 0x1.8000000000006p+1
+ comp64(double_of_bits(0xc0080000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000003), 0xc0080000, 0x00000006, STR(__LINE__)); // -0x1.8000000000001p+1 * 0x1.0000000000003p+0 = -0x1.8000000000006p+1
+}
+
+void f261(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000003) * double_of_bits(0xc0080000, 0x00000001), 0xc0080000, 0x00000006, STR(__LINE__)); // 0x1.0000000000003p+0 * -0x1.8000000000001p+1 = -0x1.8000000000006p+1
+ comp64(double_of_bits(0xbff00000, 0x00000003) * double_of_bits(0x40080000, 0x00000001), 0xc0080000, 0x00000006, STR(__LINE__)); // -0x1.0000000000003p+0 * 0x1.8000000000001p+1 = -0x1.8000000000006p+1
+ comp64(double_of_bits(0x40080000, 0x00000001) * double_of_bits(0xbff00000, 0x00000003), 0xc0080000, 0x00000006, STR(__LINE__)); // 0x1.8000000000001p+1 * -0x1.0000000000003p+0 = -0x1.8000000000006p+1
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * 0x1.ffffffffffffep-1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x4007ffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1.7ffffffffffffp+1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * -0x1.ffffffffffffep-1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0xc007ffff, 0xffffffff), 0x4007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1.7ffffffffffffp+1 = 0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffe), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * 0x1.ffffffffffffep-1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0xc007ffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1.7ffffffffffffp+1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x4007ffff, 0xffffffff), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1.7ffffffffffffp+1 = -0x1.7fffffffffffep+1
+}
+
+void f262(void) {
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffe), 0xc007ffff, 0xfffffffe, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * -0x1.ffffffffffffep-1 = -0x1.7fffffffffffep+1
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffc), 0x401bffff, 0xfffffffc, STR(__LINE__)); // 0x1.bffffffffffffp+2 * 0x1.ffffffffffffcp-1 = 0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0x401bffff, 0xffffffff), 0x401bffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * 0x1.bffffffffffffp+2 = 0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffc), 0x401bffff, 0xfffffffc, STR(__LINE__)); // -0x1.bffffffffffffp+2 * -0x1.ffffffffffffcp-1 = 0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0xc01bffff, 0xffffffff), 0x401bffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * -0x1.bffffffffffffp+2 = 0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0xc01bffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffc), 0xc01bffff, 0xfffffffc, STR(__LINE__)); // -0x1.bffffffffffffp+2 * 0x1.ffffffffffffcp-1 = -0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0x3fefffff, 0xfffffffc) * double_of_bits(0xc01bffff, 0xffffffff), 0xc01bffff, 0xfffffffc, STR(__LINE__)); // 0x1.ffffffffffffcp-1 * -0x1.bffffffffffffp+2 = -0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0xbfefffff, 0xfffffffc) * double_of_bits(0x401bffff, 0xffffffff), 0xc01bffff, 0xfffffffc, STR(__LINE__)); // -0x1.ffffffffffffcp-1 * 0x1.bffffffffffffp+2 = -0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0x401bffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffc), 0xc01bffff, 0xfffffffc, STR(__LINE__)); // 0x1.bffffffffffffp+2 * -0x1.ffffffffffffcp-1 = -0x1.bfffffffffffcp+2
+ comp64(double_of_bits(0x00180000, 0x00000001) * double_of_bits(0x40100000, 0x00000001), 0x00380000, 0x00000003, STR(__LINE__)); // 0x1.8000000000001p-1022 * 0x1.0000000000001p+2 = 0x1.8000000000003p-1020
+}
+
+void f263(void) {
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x00180000, 0x00000001), 0x00380000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+2 * 0x1.8000000000001p-1022 = 0x1.8000000000003p-1020
+ comp64(double_of_bits(0x80180000, 0x00000001) * double_of_bits(0xc0100000, 0x00000001), 0x00380000, 0x00000003, STR(__LINE__)); // -0x1.8000000000001p-1022 * -0x1.0000000000001p+2 = 0x1.8000000000003p-1020
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x80180000, 0x00000001), 0x00380000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+2 * -0x1.8000000000001p-1022 = 0x1.8000000000003p-1020
+ comp64(double_of_bits(0x80180000, 0x00000001) * double_of_bits(0x40100000, 0x00000001), 0x80380000, 0x00000003, STR(__LINE__)); // -0x1.8000000000001p-1022 * 0x1.0000000000001p+2 = -0x1.8000000000003p-1020
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x80180000, 0x00000001), 0x80380000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+2 * -0x1.8000000000001p-1022 = -0x1.8000000000003p-1020
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x00180000, 0x00000001), 0x80380000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+2 * 0x1.8000000000001p-1022 = -0x1.8000000000003p-1020
+ comp64(double_of_bits(0x00180000, 0x00000001) * double_of_bits(0xc0100000, 0x00000001), 0x80380000, 0x00000003, STR(__LINE__)); // 0x1.8000000000001p-1022 * -0x1.0000000000001p+2 = -0x1.8000000000003p-1020
+ comp64(double_of_bits(0x401c0000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0x401c0000, 0x00000003, STR(__LINE__)); // 0x1.c000000000001p+2 * 0x1.0000000000001p+0 = 0x1.c000000000003p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x401c0000, 0x00000001), 0x401c0000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.c000000000001p+2 = 0x1.c000000000003p+2
+ comp64(double_of_bits(0xc01c0000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0x401c0000, 0x00000003, STR(__LINE__)); // -0x1.c000000000001p+2 * -0x1.0000000000001p+0 = 0x1.c000000000003p+2
+}
+
+void f264(void) {
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xc01c0000, 0x00000001), 0x401c0000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.c000000000001p+2 = 0x1.c000000000003p+2
+ comp64(double_of_bits(0xc01c0000, 0x00000001) * double_of_bits(0x3ff00000, 0x00000001), 0xc01c0000, 0x00000003, STR(__LINE__)); // -0x1.c000000000001p+2 * 0x1.0000000000001p+0 = -0x1.c000000000003p+2
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xc01c0000, 0x00000001), 0xc01c0000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.c000000000001p+2 = -0x1.c000000000003p+2
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x401c0000, 0x00000001), 0xc01c0000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.c000000000001p+2 = -0x1.c000000000003p+2
+ comp64(double_of_bits(0x401c0000, 0x00000001) * double_of_bits(0xbff00000, 0x00000001), 0xc01c0000, 0x00000003, STR(__LINE__)); // 0x1.c000000000001p+2 * -0x1.0000000000001p+0 = -0x1.c000000000003p+2
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep-1 * 0x1.0000000000001p+0 = 0x1p+0
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffe), 0x3ff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * 0x1.ffffffffffffep-1 = 0x1p+0
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000001), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep-1 * -0x1.0000000000001p+0 = 0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffe), 0x3ff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * -0x1.ffffffffffffep-1 = 0x1p+0
+ comp64(double_of_bits(0xbfefffff, 0xfffffffe) * double_of_bits(0x3ff00000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.ffffffffffffep-1 * 0x1.0000000000001p+0 = -0x1p+0
+}
+
+void f265(void) {
+ comp64(double_of_bits(0x3ff00000, 0x00000001) * double_of_bits(0xbfefffff, 0xfffffffe), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1.0000000000001p+0 * -0x1.ffffffffffffep-1 = -0x1p+0
+ comp64(double_of_bits(0xbff00000, 0x00000001) * double_of_bits(0x3fefffff, 0xfffffffe), 0xbff00000, 0x00000000, STR(__LINE__)); // -0x1.0000000000001p+0 * 0x1.ffffffffffffep-1 = -0x1p+0
+ comp64(double_of_bits(0x3fefffff, 0xfffffffe) * double_of_bits(0xbff00000, 0x00000001), 0xbff00000, 0x00000000, STR(__LINE__)); // 0x1.ffffffffffffep-1 * -0x1.0000000000001p+0 = -0x1p+0
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffd), 0x4007ffff, 0xfffffffd, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * 0x1.ffffffffffffdp-1 = 0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) * double_of_bits(0x4007ffff, 0xffffffff), 0x4007ffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp-1 * 0x1.7ffffffffffffp+1 = 0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffd), 0x4007ffff, 0xfffffffd, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * -0x1.ffffffffffffdp-1 = 0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffd) * double_of_bits(0xc007ffff, 0xffffffff), 0x4007ffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp-1 * -0x1.7ffffffffffffp+1 = 0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0xc007ffff, 0xffffffff) * double_of_bits(0x3fefffff, 0xfffffffd), 0xc007ffff, 0xfffffffd, STR(__LINE__)); // -0x1.7ffffffffffffp+1 * 0x1.ffffffffffffdp-1 = -0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0x3fefffff, 0xfffffffd) * double_of_bits(0xc007ffff, 0xffffffff), 0xc007ffff, 0xfffffffd, STR(__LINE__)); // 0x1.ffffffffffffdp-1 * -0x1.7ffffffffffffp+1 = -0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0xbfefffff, 0xfffffffd) * double_of_bits(0x4007ffff, 0xffffffff), 0xc007ffff, 0xfffffffd, STR(__LINE__)); // -0x1.ffffffffffffdp-1 * 0x1.7ffffffffffffp+1 = -0x1.7fffffffffffdp+1
+}
+
+void f266(void) {
+ comp64(double_of_bits(0x4007ffff, 0xffffffff) * double_of_bits(0xbfefffff, 0xfffffffd), 0xc007ffff, 0xfffffffd, STR(__LINE__)); // 0x1.7ffffffffffffp+1 * -0x1.ffffffffffffdp-1 = -0x1.7fffffffffffdp+1
+ comp64(double_of_bits(0x002c0000, 0x00000001) * double_of_bits(0x40100000, 0x00000001), 0x004c0000, 0x00000003, STR(__LINE__)); // 0x1.c000000000001p-1021 * 0x1.0000000000001p+2 = 0x1.c000000000003p-1019
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x002c0000, 0x00000001), 0x004c0000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+2 * 0x1.c000000000001p-1021 = 0x1.c000000000003p-1019
+ comp64(double_of_bits(0x802c0000, 0x00000001) * double_of_bits(0xc0100000, 0x00000001), 0x004c0000, 0x00000003, STR(__LINE__)); // -0x1.c000000000001p-1021 * -0x1.0000000000001p+2 = 0x1.c000000000003p-1019
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x802c0000, 0x00000001), 0x004c0000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+2 * -0x1.c000000000001p-1021 = 0x1.c000000000003p-1019
+ comp64(double_of_bits(0x802c0000, 0x00000001) * double_of_bits(0x40100000, 0x00000001), 0x804c0000, 0x00000003, STR(__LINE__)); // -0x1.c000000000001p-1021 * 0x1.0000000000001p+2 = -0x1.c000000000003p-1019
+ comp64(double_of_bits(0x40100000, 0x00000001) * double_of_bits(0x802c0000, 0x00000001), 0x804c0000, 0x00000003, STR(__LINE__)); // 0x1.0000000000001p+2 * -0x1.c000000000001p-1021 = -0x1.c000000000003p-1019
+ comp64(double_of_bits(0xc0100000, 0x00000001) * double_of_bits(0x002c0000, 0x00000001), 0x804c0000, 0x00000003, STR(__LINE__)); // -0x1.0000000000001p+2 * 0x1.c000000000001p-1021 = -0x1.c000000000003p-1019
+ comp64(double_of_bits(0x002c0000, 0x00000001) * double_of_bits(0xc0100000, 0x00000001), 0x804c0000, 0x00000003, STR(__LINE__)); // 0x1.c000000000001p-1021 * -0x1.0000000000001p+2 = -0x1.c000000000003p-1019
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * 0x1p+1 = 0x1.ffffffffffffep-1022
+}
+
+void f267(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // 0x1p+1 * 0x0.fffffffffffffp-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * -0x1p+1 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x001fffff, 0xfffffffe, STR(__LINE__)); // -0x1p+1 * -0x0.fffffffffffffp-1022 = 0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x800fffff, 0xffffffff) * double_of_bits(0x40000000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x0.fffffffffffffp-1022 * 0x1p+1 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x800fffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x1p+1 * -0x0.fffffffffffffp-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x000fffff, 0xffffffff), 0x801fffff, 0xfffffffe, STR(__LINE__)); // -0x1p+1 * 0x0.fffffffffffffp-1022 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0x000fffff, 0xffffffff) * double_of_bits(0xc0000000, 0x00000000), 0x801fffff, 0xfffffffe, STR(__LINE__)); // 0x0.fffffffffffffp-1022 * -0x1p+1 = -0x1.ffffffffffffep-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x000fffff, 0xfffffffd), 0x801fffff, 0xfffffffa, STR(__LINE__)); // -0x1p+1 * 0x0.ffffffffffffdp-1022 = -0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffd) * double_of_bits(0xc0000000, 0x00000000), 0x801fffff, 0xfffffffa, STR(__LINE__)); // 0x0.ffffffffffffdp-1022 * -0x1p+1 = -0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffd) * double_of_bits(0xc0000000, 0x00000000), 0x001fffff, 0xfffffffa, STR(__LINE__)); // -0x0.ffffffffffffdp-1022 * -0x1p+1 = 0x1.ffffffffffffap-1022
+}
+
+void f268(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x800fffff, 0xfffffffd), 0x001fffff, 0xfffffffa, STR(__LINE__)); // -0x1p+1 * -0x0.ffffffffffffdp-1022 = 0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x800fffff, 0xfffffffd), 0x801fffff, 0xfffffffa, STR(__LINE__)); // 0x1p+1 * -0x0.ffffffffffffdp-1022 = -0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffd) * double_of_bits(0x40000000, 0x00000000), 0x801fffff, 0xfffffffa, STR(__LINE__)); // -0x0.ffffffffffffdp-1022 * 0x1p+1 = -0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffd) * double_of_bits(0x40000000, 0x00000000), 0x001fffff, 0xfffffffa, STR(__LINE__)); // 0x0.ffffffffffffdp-1022 * 0x1p+1 = 0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x000fffff, 0xfffffffd), 0x001fffff, 0xfffffffa, STR(__LINE__)); // 0x1p+1 * 0x0.ffffffffffffdp-1022 = 0x1.ffffffffffffap-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffc) * double_of_bits(0x40000000, 0x00000000), 0x001fffff, 0xfffffff8, STR(__LINE__)); // 0x0.ffffffffffffcp-1022 * 0x1p+1 = 0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x000fffff, 0xfffffffc), 0x001fffff, 0xfffffff8, STR(__LINE__)); // 0x1p+1 * 0x0.ffffffffffffcp-1022 = 0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffc) * double_of_bits(0xc0000000, 0x00000000), 0x001fffff, 0xfffffff8, STR(__LINE__)); // -0x0.ffffffffffffcp-1022 * -0x1p+1 = 0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x800fffff, 0xfffffffc), 0x001fffff, 0xfffffff8, STR(__LINE__)); // -0x1p+1 * -0x0.ffffffffffffcp-1022 = 0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x000fffff, 0xfffffffc) * double_of_bits(0xc0000000, 0x00000000), 0x801fffff, 0xfffffff8, STR(__LINE__)); // 0x0.ffffffffffffcp-1022 * -0x1p+1 = -0x1.ffffffffffff8p-1022
+}
+
+void f269(void) {
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x000fffff, 0xfffffffc), 0x801fffff, 0xfffffff8, STR(__LINE__)); // -0x1p+1 * 0x0.ffffffffffffcp-1022 = -0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x800fffff, 0xfffffffc), 0x801fffff, 0xfffffff8, STR(__LINE__)); // 0x1p+1 * -0x0.ffffffffffffcp-1022 = -0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x800fffff, 0xfffffffc) * double_of_bits(0x40000000, 0x00000000), 0x801fffff, 0xfffffff8, STR(__LINE__)); // -0x0.ffffffffffffcp-1022 * 0x1p+1 = -0x1.ffffffffffff8p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // 0x1p+1 * 0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p+1 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000002, STR(__LINE__)); // -0x1p+1 * -0x0.0000000000001p-1022 = 0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // -0x1p+1 * 0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000002, STR(__LINE__)); // 0x1p+1 * -0x0.0000000000001p-1022 = -0x0.0000000000002p-1022
+}
+
+void f270(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000002, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p+1 = -0x0.0000000000002p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000002p-1022 * 0x1p+1 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000004, STR(__LINE__)); // 0x1p+1 * 0x0.0000000000002p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000002p-1022 * -0x1p+1 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000004, STR(__LINE__)); // -0x1p+1 * -0x0.0000000000002p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000002p-1022 * -0x1p+1 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000004, STR(__LINE__)); // -0x1p+1 * 0x0.0000000000002p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000004, STR(__LINE__)); // 0x1p+1 * -0x0.0000000000002p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000002p-1022 * 0x1p+1 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0x00000000, 0x00000006, STR(__LINE__)); // 0x0.0000000000003p-1022 * 0x1p+1 = 0x0.0000000000006p-1022
+}
+
+void f271(void) {
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000006, STR(__LINE__)); // 0x1p+1 * 0x0.0000000000003p-1022 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x00000000, 0x00000006, STR(__LINE__)); // -0x0.0000000000003p-1022 * -0x1p+1 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x00000000, 0x00000006, STR(__LINE__)); // -0x1p+1 * -0x0.0000000000003p-1022 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0xc0000000, 0x00000000), 0x80000000, 0x00000006, STR(__LINE__)); // 0x0.0000000000003p-1022 * -0x1p+1 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0xc0000000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x80000000, 0x00000006, STR(__LINE__)); // -0x1p+1 * 0x0.0000000000003p-1022 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x40000000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000006, STR(__LINE__)); // 0x1p+1 * -0x0.0000000000003p-1022 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0x40000000, 0x00000000), 0x80000000, 0x00000006, STR(__LINE__)); // -0x0.0000000000003p-1022 * 0x1p+1 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // 0x1.8p+1 * 0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40080000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.8p+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000003, STR(__LINE__)); // -0x1.8p+1 * -0x0.0000000000001p-1022 = 0x0.0000000000003p-1022
+}
+
+void f272(void) {
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0080000, 0x00000000), 0x00000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.8p+1 = 0x0.0000000000003p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // 0x1.8p+1 * -0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40080000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.8p+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0080000, 0x00000000), 0x80000000, 0x00000003, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.8p+1 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000003, STR(__LINE__)); // -0x1.8p+1 * 0x0.0000000000001p-1022 = -0x0.0000000000003p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000006, STR(__LINE__)); // 0x1.8p+1 * 0x0.0000000000002p-1022 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x40080000, 0x00000000), 0x00000000, 0x00000006, STR(__LINE__)); // 0x0.0000000000002p-1022 * 0x1.8p+1 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000006, STR(__LINE__)); // -0x1.8p+1 * -0x0.0000000000002p-1022 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0xc0080000, 0x00000000), 0x00000000, 0x00000006, STR(__LINE__)); // -0x0.0000000000002p-1022 * -0x1.8p+1 = 0x0.0000000000006p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000006, STR(__LINE__)); // 0x1.8p+1 * -0x0.0000000000002p-1022 = -0x0.0000000000006p-1022
+}
+
+void f273(void) {
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x40080000, 0x00000000), 0x80000000, 0x00000006, STR(__LINE__)); // -0x0.0000000000002p-1022 * 0x1.8p+1 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0xc0080000, 0x00000000), 0x80000000, 0x00000006, STR(__LINE__)); // 0x0.0000000000002p-1022 * -0x1.8p+1 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000006, STR(__LINE__)); // -0x1.8p+1 * 0x0.0000000000002p-1022 = -0x0.0000000000006p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x00000000, 0x00000009, STR(__LINE__)); // 0x1.8p+1 * 0x0.0000000000003p-1022 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0x40080000, 0x00000000), 0x00000000, 0x00000009, STR(__LINE__)); // 0x0.0000000000003p-1022 * 0x1.8p+1 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x00000000, 0x00000009, STR(__LINE__)); // -0x1.8p+1 * -0x0.0000000000003p-1022 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0xc0080000, 0x00000000), 0x00000000, 0x00000009, STR(__LINE__)); // -0x0.0000000000003p-1022 * -0x1.8p+1 = 0x0.0000000000009p-1022
+ comp64(double_of_bits(0x40080000, 0x00000000) * double_of_bits(0x80000000, 0x00000003), 0x80000000, 0x00000009, STR(__LINE__)); // 0x1.8p+1 * -0x0.0000000000003p-1022 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x80000000, 0x00000003) * double_of_bits(0x40080000, 0x00000000), 0x80000000, 0x00000009, STR(__LINE__)); // -0x0.0000000000003p-1022 * 0x1.8p+1 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x00000000, 0x00000003) * double_of_bits(0xc0080000, 0x00000000), 0x80000000, 0x00000009, STR(__LINE__)); // 0x0.0000000000003p-1022 * -0x1.8p+1 = -0x0.0000000000009p-1022
+}
+
+void f274(void) {
+ comp64(double_of_bits(0xc0080000, 0x00000000) * double_of_bits(0x00000000, 0x00000003), 0x80000000, 0x00000009, STR(__LINE__)); // -0x1.8p+1 * 0x0.0000000000003p-1022 = -0x0.0000000000009p-1022
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000004, STR(__LINE__)); // 0x1p+2 * 0x0.0000000000001p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p+2 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000004, STR(__LINE__)); // 0x1p+2 * -0x0.0000000000001p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p+2 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000004, STR(__LINE__)); // -0x1p+2 * -0x0.0000000000001p-1022 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000004, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p+2 = 0x0.0000000000004p-1022
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000004, STR(__LINE__)); // -0x1p+2 * 0x0.0000000000001p-1022 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000004, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p+2 = -0x0.0000000000004p-1022
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x00000000, 0x00000008, STR(__LINE__)); // 0x1p+2 * 0x0.0000000000002p-1022 = 0x0.0000000000008p-1022
+}
+
+void f275(void) {
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0x40100000, 0x00000000), 0x00000000, 0x00000008, STR(__LINE__)); // 0x0.0000000000002p-1022 * 0x1p+2 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0x40100000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x80000000, 0x00000008, STR(__LINE__)); // 0x1p+2 * -0x0.0000000000002p-1022 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0x40100000, 0x00000000), 0x80000000, 0x00000008, STR(__LINE__)); // -0x0.0000000000002p-1022 * 0x1p+2 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x80000000, 0x00000002), 0x00000000, 0x00000008, STR(__LINE__)); // -0x1p+2 * -0x0.0000000000002p-1022 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000002) * double_of_bits(0xc0100000, 0x00000000), 0x00000000, 0x00000008, STR(__LINE__)); // -0x0.0000000000002p-1022 * -0x1p+2 = 0x0.0000000000008p-1022
+ comp64(double_of_bits(0xc0100000, 0x00000000) * double_of_bits(0x00000000, 0x00000002), 0x80000000, 0x00000008, STR(__LINE__)); // -0x1p+2 * 0x0.0000000000002p-1022 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0x00000000, 0x00000002) * double_of_bits(0xc0100000, 0x00000000), 0x80000000, 0x00000008, STR(__LINE__)); // 0x0.0000000000002p-1022 * -0x1p+2 = -0x0.0000000000008p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc0140000, 0x00000000), 0x00000000, 0x00000005, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1.4p+2 = 0x0.0000000000005p-1022
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00000000, 0x00000005, STR(__LINE__)); // -0x1.4p+2 * -0x0.0000000000001p-1022 = 0x0.0000000000005p-1022
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00000000, 0x00000005, STR(__LINE__)); // 0x1.4p+2 * 0x0.0000000000001p-1022 = 0x0.0000000000005p-1022
+}
+
+void f276(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x40140000, 0x00000000), 0x00000000, 0x00000005, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1.4p+2 = 0x0.0000000000005p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x40140000, 0x00000000), 0x80000000, 0x00000005, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1.4p+2 = -0x0.0000000000005p-1022
+ comp64(double_of_bits(0x40140000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80000000, 0x00000005, STR(__LINE__)); // 0x1.4p+2 * -0x0.0000000000001p-1022 = -0x0.0000000000005p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc0140000, 0x00000000), 0x80000000, 0x00000005, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1.4p+2 = -0x0.0000000000005p-1022
+ comp64(double_of_bits(0xc0140000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80000000, 0x00000005, STR(__LINE__)); // -0x1.4p+2 * 0x0.0000000000001p-1022 = -0x0.0000000000005p-1022
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x43500000, 0x00000000), 0x00300000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p+54 = 0x1p-1020
+ comp64(double_of_bits(0x43500000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00300000, 0x00000000, STR(__LINE__)); // 0x1p+54 * 0x0.0000000000001p-1022 = 0x1p-1020
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc3500000, 0x00000000), 0x00300000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p+54 = 0x1p-1020
+ comp64(double_of_bits(0xc3500000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00300000, 0x00000000, STR(__LINE__)); // -0x1p+54 * -0x0.0000000000001p-1022 = 0x1p-1020
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x43500000, 0x00000000), 0x80300000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p+54 = -0x1p-1020
+}
+
+void f277(void) {
+ comp64(double_of_bits(0x43500000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80300000, 0x00000000, STR(__LINE__)); // 0x1p+54 * -0x0.0000000000001p-1022 = -0x1p-1020
+ comp64(double_of_bits(0xc3500000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80300000, 0x00000000, STR(__LINE__)); // -0x1p+54 * 0x0.0000000000001p-1022 = -0x1p-1020
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc3500000, 0x00000000), 0x80300000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p+54 = -0x1p-1020
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0x43300000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * 0x1p+52 = 0x1p-1022
+ comp64(double_of_bits(0x43300000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // 0x1p+52 * 0x0.0000000000001p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0xc3300000, 0x00000000), 0x00100000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * -0x1p+52 = 0x1p-1022
+ comp64(double_of_bits(0xc3300000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x00100000, 0x00000000, STR(__LINE__)); // -0x1p+52 * -0x0.0000000000001p-1022 = 0x1p-1022
+ comp64(double_of_bits(0x80000000, 0x00000001) * double_of_bits(0x43300000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // -0x0.0000000000001p-1022 * 0x1p+52 = -0x1p-1022
+ comp64(double_of_bits(0x43300000, 0x00000000) * double_of_bits(0x80000000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // 0x1p+52 * -0x0.0000000000001p-1022 = -0x1p-1022
+ comp64(double_of_bits(0xc3300000, 0x00000000) * double_of_bits(0x00000000, 0x00000001), 0x80100000, 0x00000000, STR(__LINE__)); // -0x1p+52 * 0x0.0000000000001p-1022 = -0x1p-1022
+}
+
+void f278(void) {
+ comp64(double_of_bits(0x00000000, 0x00000001) * double_of_bits(0xc3300000, 0x00000000), 0x80100000, 0x00000000, STR(__LINE__)); // 0x0.0000000000001p-1022 * -0x1p+52 = -0x1p-1022
+ comp32((signed)0x00000001, 0x3f800000, STR(__LINE__)); // (signed)0x00000001 = 0x1p+0
+ comp32((signed)0x00000000, 0x00000000, STR(__LINE__)); // (signed)0x00000000 = 0x0p+0
+ comp32((signed)0x00000002, 0x40000000, STR(__LINE__)); // (signed)0x00000002 = 0x1p+1
+ comp32((signed)0x00000003, 0x40400000, STR(__LINE__)); // (signed)0x00000003 = 0x1.8p+1
+ comp32((signed)0x00000010, 0x41800000, STR(__LINE__)); // (signed)0x00000010 = 0x1p+4
+ comp32((signed)0x00000100, 0x43800000, STR(__LINE__)); // (signed)0x00000100 = 0x1p+8
+ comp32((signed)0x00010001, 0x47800080, STR(__LINE__)); // (signed)0x00010001 = 0x1.0001p+16
+ comp32((signed)0x0000ffff, 0x477fff00, STR(__LINE__)); // (signed)0x0000ffff = 0x1.fffep+15
+ comp32((signed)0x00ffffff, 0x4b7fffff, STR(__LINE__)); // (signed)0x00ffffff = 0x1.fffffep+23
+}
+
+void f279(void) {
+ comp32((signed)0xffffffff, 0xbf800000, STR(__LINE__)); // (signed)0xffffffff = -0x1p+0
+ comp32((signed)0xfffffff0, 0xc1800000, STR(__LINE__)); // (signed)0xfffffff0 = -0x1p+4
+ comp32((signed)0x80000000, 0xcf000000, STR(__LINE__)); // (signed)0x80000000 = -0x1p+31
+ comp32((signed)0x01000001, 0x4b800000, STR(__LINE__)); // (signed)0x01000001 = 0x1p+24
+ comp32((signed)0x02000003, 0x4c000001, STR(__LINE__)); // (signed)0x02000003 = 0x1.000002p+25
+ comp32((unsigned)0x00000001, 0x3f800000, STR(__LINE__)); // (unsigned)0x00000001 = 0x1p+0
+ comp32((unsigned)0x00000000, 0x00000000, STR(__LINE__)); // (unsigned)0x00000000 = 0x0p+0
+ comp32((unsigned)0x00000002, 0x40000000, STR(__LINE__)); // (unsigned)0x00000002 = 0x1p+1
+ comp32((unsigned)0x00000003, 0x40400000, STR(__LINE__)); // (unsigned)0x00000003 = 0x1.8p+1
+ comp32((unsigned)0x00000010, 0x41800000, STR(__LINE__)); // (unsigned)0x00000010 = 0x1p+4
+}
+
+void f280(void) {
+ comp32((unsigned)0x00000100, 0x43800000, STR(__LINE__)); // (unsigned)0x00000100 = 0x1p+8
+ comp32((unsigned)0x00010001, 0x47800080, STR(__LINE__)); // (unsigned)0x00010001 = 0x1.0001p+16
+ comp32((unsigned)0x0000ffff, 0x477fff00, STR(__LINE__)); // (unsigned)0x0000ffff = 0x1.fffep+15
+ comp32((unsigned)0x00ffffff, 0x4b7fffff, STR(__LINE__)); // (unsigned)0x00ffffff = 0x1.fffffep+23
+ comp32((unsigned)0x80000000, 0x4f000000, STR(__LINE__)); // (unsigned)0x80000000 = 0x1p+31
+ comp32((unsigned)0x01000001, 0x4b800000, STR(__LINE__)); // (unsigned)0x01000001 = 0x1p+24
+ comp32((unsigned)0x02000003, 0x4c000001, STR(__LINE__)); // (unsigned)0x02000003 = 0x1.000002p+25
+ compi((signed)single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // (signed)0x0p+0 = 00000000
+ compi((signed)single_of_bits(0x3f800000), 0x00000001, STR(__LINE__)); // (signed)0x1p+0 = 00000001
+ compi((signed)single_of_bits(0x40000000), 0x00000002, STR(__LINE__)); // (signed)0x1p+1 = 00000002
+}
+
+void f281(void) {
+ compi((signed)single_of_bits(0x40400000), 0x00000003, STR(__LINE__)); // (signed)0x1.8p+1 = 00000003
+ compi((signed)single_of_bits(0x41800000), 0x00000010, STR(__LINE__)); // (signed)0x1p+4 = 00000010
+ compi((signed)single_of_bits(0x43800000), 0x00000100, STR(__LINE__)); // (signed)0x1p+8 = 00000100
+ compi((signed)single_of_bits(0x47800080), 0x00010001, STR(__LINE__)); // (signed)0x1.0001p+16 = 00010001
+ compi((signed)single_of_bits(0x477fff00), 0x0000ffff, STR(__LINE__)); // (signed)0x1.fffep+15 = 0000ffff
+ compi((signed)single_of_bits(0xbf800000), 0xffffffff, STR(__LINE__)); // (signed)-0x1p+0 = ffffffff
+ compi((signed)single_of_bits(0xcf000000), 0x80000000, STR(__LINE__)); // (signed)-0x1p+31 = 80000000
+ compi((signed)single_of_bits(0x4effffff), 0x7fffff80, STR(__LINE__)); // (signed)0x1.fffffep+30 = 7fffff80
+ compi((signed)single_of_bits(0xcf000000), 0x80000000, STR(__LINE__)); // (signed)-0x1p+31 = 80000000
+ compi((signed)single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // (signed)0x1p-149 = 00000000
+}
+
+void f282(void) {
+ compi((signed)single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // (signed)-0x1p-149 = 00000000
+ compi((signed)single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // (signed)0x1p-126 = 00000000
+ compi((signed)single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // (signed)-0x1p-126 = 00000000
+ compi((signed)single_of_bits(0x41200001), 0x0000000a, STR(__LINE__)); // (signed)0x1.400002p+3 = 0000000a
+ compi((signed)single_of_bits(0xc1200001), 0xfffffff6, STR(__LINE__)); // (signed)-0x1.400002p+3 = fffffff6
+ compi((signed)single_of_bits(0x3f7fffff), 0x00000000, STR(__LINE__)); // (signed)0x1.fffffep-1 = 00000000
+ compi((signed)single_of_bits(0xbf7fffff), 0x00000000, STR(__LINE__)); // (signed)-0x1.fffffep-1 = 00000000
+ compi((signed)single_of_bits(0x3fffffff), 0x00000001, STR(__LINE__)); // (signed)0x1.fffffep+0 = 00000001
+ compi((signed)single_of_bits(0xbfffffff), 0xffffffff, STR(__LINE__)); // (signed)-0x1.fffffep+0 = ffffffff
+ compi((signed)single_of_bits(0x3fc00000), 0x00000001, STR(__LINE__)); // (signed)0x1.8p+0 = 00000001
+}
+
+void f283(void) {
+ compi((signed)single_of_bits(0xbfc00000), 0xffffffff, STR(__LINE__)); // (signed)-0x1.8p+0 = ffffffff
+ compi((signed)single_of_bits(0x40200000), 0x00000002, STR(__LINE__)); // (signed)0x1.4p+1 = 00000002
+ compi((signed)single_of_bits(0xc0200000), 0xfffffffe, STR(__LINE__)); // (signed)-0x1.4p+1 = fffffffe
+ compi((signed)single_of_bits(0x3fc00001), 0x00000001, STR(__LINE__)); // (signed)0x1.800002p+0 = 00000001
+ compi((signed)single_of_bits(0xbfc00001), 0xffffffff, STR(__LINE__)); // (signed)-0x1.800002p+0 = ffffffff
+ compi((signed)single_of_bits(0x3fbfffff), 0x00000001, STR(__LINE__)); // (signed)0x1.7ffffep+0 = 00000001
+ compi((signed)single_of_bits(0xbfbfffff), 0xffffffff, STR(__LINE__)); // (signed)-0x1.7ffffep+0 = ffffffff
+ compi((signed)single_of_bits(0x4b800001), 0x01000002, STR(__LINE__)); // (signed)0x1.000002p+24 = 01000002
+ compi((signed)single_of_bits(0x4b7fffff), 0x00ffffff, STR(__LINE__)); // (signed)0x1.fffffep+23 = 00ffffff
+ compi((signed)single_of_bits(0x4b000001), 0x00800001, STR(__LINE__)); // (signed)0x1.000002p+23 = 00800001
+}
+
+void f284(void) {
+ compi((signed)single_of_bits(0xcb000001), 0xff7fffff, STR(__LINE__)); // (signed)-0x1.000002p+23 = ff7fffff
+ compi((signed)single_of_bits(0x4a800001), 0x00400000, STR(__LINE__)); // (signed)0x1.000002p+22 = 00400000
+ comp32(double_of_bits(0x00000000, 0x00000000), 0x00000000, STR(__LINE__)); // 0x0p+0 = 0x0p+0
+ comp32(double_of_bits(0x80000000, 0x00000000), 0x80000000, STR(__LINE__)); // -0x0p+0 = -0x0p+0
+ comp32(double_of_bits(0x7ff80000, 0x00000000), 0x7fc00000, STR(__LINE__)); // nan = nan
+ comp32(double_of_bits(0xfff80000, 0x00000000), 0xffc00000, STR(__LINE__)); // -nan = -nan
+ comp32(double_of_bits(0x7ff00000, 0x00000000), 0x7f800000, STR(__LINE__)); // inf = inf
+ comp32(double_of_bits(0xfff00000, 0x00000000), 0xff800000, STR(__LINE__)); // -inf = -inf
+ comp32(double_of_bits(0x47e00000, 0x10000000), 0x7f000000, STR(__LINE__)); // 0x1.000001p+127 = 0x1p+127
+ comp32(double_of_bits(0x47f00000, 0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p+128 = inf
+}
+
+void f285(void) {
+ comp32(double_of_bits(0xc7f00000, 0x00000000), 0xff800000, STR(__LINE__)); // -0x1p+128 = -inf
+ comp32(double_of_bits(0x47efffff, 0xffffffff), 0x7f800000, STR(__LINE__)); // 0x1.fffffffffffffp+127 = inf
+ comp32(double_of_bits(0xc7efffff, 0xffffffff), 0xff800000, STR(__LINE__)); // -0x1.fffffffffffffp+127 = -inf
+ comp32(double_of_bits(0x7fe00000, 0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p+1023 = inf
+ comp32(double_of_bits(0xffe00000, 0x00000000), 0xff800000, STR(__LINE__)); // -0x1p+1023 = -inf
+ comp32(double_of_bits(0x47e00000, 0x00000000), 0x7f000000, STR(__LINE__)); // 0x1p+127 = 0x1p+127
+ comp32(double_of_bits(0xc7e00000, 0x00000000), 0xff000000, STR(__LINE__)); // -0x1p+127 = -0x1p+127
+ comp32(double_of_bits(0x38000000, 0x00000000), 0x00400000, STR(__LINE__)); // 0x1p-127 = 0x1p-127
+ comp32(double_of_bits(0xb8000000, 0x00000000), 0x80400000, STR(__LINE__)); // -0x1p-127 = -0x1p-127
+ comp32(double_of_bits(0x38100000, 0x00000000), 0x00800000, STR(__LINE__)); // 0x1p-126 = 0x1p-126
+}
+
+void f286(void) {
+ comp32(double_of_bits(0xb8100000, 0x00000000), 0x80800000, STR(__LINE__)); // -0x1p-126 = -0x1p-126
+ comp32(double_of_bits(0x36a00000, 0x00000000), 0x00000001, STR(__LINE__)); // 0x1p-149 = 0x1p-149
+ comp32(double_of_bits(0xb6a00000, 0x00000000), 0x80000001, STR(__LINE__)); // -0x1p-149 = -0x1p-149
+ comp32(double_of_bits(0x380fffff, 0xffffffff), 0x00800000, STR(__LINE__)); // 0x1.fffffffffffffp-127 = 0x1p-126
+ comp32(double_of_bits(0xb80fffff, 0xffffffff), 0x80800000, STR(__LINE__)); // -0x1.fffffffffffffp-127 = -0x1p-126
+ comp32(double_of_bits(0x38000000, 0x38000000), 0x00400001, STR(__LINE__)); // 0x1.0000038p-127 = 0x1.000004p-127
+ comp32(double_of_bits(0xb8000000, 0x38000000), 0x80400001, STR(__LINE__)); // -0x1.0000038p-127 = -0x1.000004p-127
+ comp32(double_of_bits(0x36980000, 0x00000000), 0x00000001, STR(__LINE__)); // 0x1.8p-150 = 0x1p-149
+ comp32(double_of_bits(0xb6980000, 0x00000000), 0x80000001, STR(__LINE__)); // -0x1.8p-150 = -0x1p-149
+ comp32(double_of_bits(0x369c0000, 0x00000000), 0x00000001, STR(__LINE__)); // 0x1.cp-150 = 0x1p-149
+}
+
+void f287(void) {
+ comp32(double_of_bits(0xb69c0000, 0x00000000), 0x80000001, STR(__LINE__)); // -0x1.cp-150 = -0x1p-149
+ comp32(double_of_bits(0x37f00000, 0x60000000), 0x00200001, STR(__LINE__)); // 0x1.000006p-128 = 0x1.000008p-128
+ comp32(double_of_bits(0xb7f00000, 0x60000000), 0x80200001, STR(__LINE__)); // -0x1.000006p-128 = -0x1.000008p-128
+ comp32(double_of_bits(0x36a80000, 0x00000000), 0x00000002, STR(__LINE__)); // 0x1.8p-149 = 0x1p-148
+ comp32(double_of_bits(0xb6a80000, 0x00000000), 0x80000002, STR(__LINE__)); // -0x1.8p-149 = -0x1p-148
+ comp32(double_of_bits(0x38000000, 0x60000000), 0x00400002, STR(__LINE__)); // 0x1.000006p-127 = 0x1.000008p-127
+ comp32(double_of_bits(0xb8000000, 0x60000000), 0x80400002, STR(__LINE__)); // -0x1.000006p-127 = -0x1.000008p-127
+ comp32(double_of_bits(0x36800000, 0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-151 = 0x0p+0
+ comp32(double_of_bits(0xb6800000, 0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-151 = -0x0p+0
+ comp32(double_of_bits(0x38000000, 0x20000000), 0x00400000, STR(__LINE__)); // 0x1.000002p-127 = 0x1p-127
+}
+
+void f288(void) {
+ comp32(double_of_bits(0xb8000000, 0x20000000), 0x80400000, STR(__LINE__)); // -0x1.000002p-127 = -0x1p-127
+ comp32(double_of_bits(0x36700000, 0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-152 = 0x0p+0
+ comp32(double_of_bits(0xb6700000, 0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-152 = -0x0p+0
+ comp32(double_of_bits(0x37e00000, 0x60000000), 0x00100000, STR(__LINE__)); // 0x1.000006p-129 = 0x1p-129
+ comp32(double_of_bits(0xb7e00000, 0x60000000), 0x80100000, STR(__LINE__)); // -0x1.000006p-129 = -0x1p-129
+ comp32(double_of_bits(0x37f00000, 0x20000000), 0x00200000, STR(__LINE__)); // 0x1.000002p-128 = 0x1p-128
+ comp32(double_of_bits(0xb7f00000, 0x20000000), 0x80200000, STR(__LINE__)); // -0x1.000002p-128 = -0x1p-128
+ comp32(double_of_bits(0x37e00000, 0x20000000), 0x00100000, STR(__LINE__)); // 0x1.000002p-129 = 0x1p-129
+ comp32(double_of_bits(0xb7e00000, 0x20000000), 0x80100000, STR(__LINE__)); // -0x1.000002p-129 = -0x1p-129
+ comp32(double_of_bits(0x36880000, 0x00000000), 0x00000000, STR(__LINE__)); // 0x1.8p-151 = 0x0p+0
+}
+
+void f289(void) {
+ comp32(double_of_bits(0xb6880000, 0x00000000), 0x80000000, STR(__LINE__)); // -0x1.8p-151 = -0x0p+0
+ comp32(double_of_bits(0x36700000, 0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-152 = 0x0p+0
+ comp32(double_of_bits(0xb6700000, 0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-152 = -0x0p+0
+ comp32(double_of_bits(0x3ff00000, 0x00000000), 0x3f800000, STR(__LINE__)); // 0x1p+0 = 0x1p+0
+ comp32(double_of_bits(0x40000000, 0x00000000), 0x40000000, STR(__LINE__)); // 0x1p+1 = 0x1p+1
+ comp32(double_of_bits(0x40080000, 0x00000000), 0x40400000, STR(__LINE__)); // 0x1.8p+1 = 0x1.8p+1
+ comp32(double_of_bits(0x40300000, 0x00000000), 0x41800000, STR(__LINE__)); // 0x1p+4 = 0x1p+4
+ comp32(double_of_bits(0x40310000, 0x00000000), 0x41880000, STR(__LINE__)); // 0x1.1p+4 = 0x1.1p+4
+ comp32(double_of_bits(0x40700000, 0x00000000), 0x43800000, STR(__LINE__)); // 0x1p+8 = 0x1p+8
+ comp32(double_of_bits(0x40f00010, 0x00000000), 0x47800080, STR(__LINE__)); // 0x1.0001p+16 = 0x1.0001p+16
+}
+
+void f290(void) {
+ comp32(double_of_bits(0x40efffe0, 0x00000000), 0x477fff00, STR(__LINE__)); // 0x1.fffep+15 = 0x1.fffep+15
+ comp32(double_of_bits(0xbff00000, 0x00000000), 0xbf800000, STR(__LINE__)); // -0x1p+0 = -0x1p+0
+ comp32(double_of_bits(0xc1e00000, 0x00000000), 0xcf000000, STR(__LINE__)); // -0x1p+31 = -0x1p+31
+ comp32(double_of_bits(0xc3e00000, 0x00000000), 0xdf000000, STR(__LINE__)); // -0x1p+63 = -0x1p+63
+ comp32(double_of_bits(0x3ff00000, 0x18000000), 0x3f800001, STR(__LINE__)); // 0x1.0000018p+0 = 0x1.000002p+0
+ comp32(double_of_bits(0x3ff00000, 0x10000001), 0x3f800001, STR(__LINE__)); // 0x1.0000010000001p+0 = 0x1.000002p+0
+ comp32(double_of_bits(0xbfefffff, 0xffffffff), 0xbf800000, STR(__LINE__)); // -0x1.fffffffffffffp-1 = -0x1p+0
+ comp32(double_of_bits(0x3ff00000, 0x30000000), 0x3f800002, STR(__LINE__)); // 0x1.000003p+0 = 0x1.000004p+0
+ comp32(double_of_bits(0x3ff00000, 0x10000000), 0x3f800000, STR(__LINE__)); // 0x1.000001p+0 = 0x1p+0
+ comp32(double_of_bits(0x3ff00000, 0x00000001), 0x3f800000, STR(__LINE__)); // 0x1.0000000000001p+0 = 0x1p+0
+}
+
+void f291(void) {
+ compi((unsigned)single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // (unsigned)0x0p+0 = 00000000
+ compi((unsigned)single_of_bits(0x3f800000), 0x00000001, STR(__LINE__)); // (unsigned)0x1p+0 = 00000001
+ compi((unsigned)single_of_bits(0x40000000), 0x00000002, STR(__LINE__)); // (unsigned)0x1p+1 = 00000002
+ compi((unsigned)single_of_bits(0x40400000), 0x00000003, STR(__LINE__)); // (unsigned)0x1.8p+1 = 00000003
+ compi((unsigned)single_of_bits(0x41800000), 0x00000010, STR(__LINE__)); // (unsigned)0x1p+4 = 00000010
+ compi((unsigned)single_of_bits(0x43800000), 0x00000100, STR(__LINE__)); // (unsigned)0x1p+8 = 00000100
+ compi((unsigned)single_of_bits(0x47800080), 0x00010001, STR(__LINE__)); // (unsigned)0x1.0001p+16 = 00010001
+ compi((unsigned)single_of_bits(0x477fff00), 0x0000ffff, STR(__LINE__)); // (unsigned)0x1.fffep+15 = 0000ffff
+ compi((unsigned)single_of_bits(0x4f7fffff), 0xffffff00, STR(__LINE__)); // (unsigned)0x1.fffffep+31 = ffffff00
+ compi((unsigned)single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // (unsigned)0x1p-149 = 00000000
+}
+
+void f292(void) {
+ compi((unsigned)single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // (unsigned)0x1p-126 = 00000000
+ compi((unsigned)single_of_bits(0x41200001), 0x0000000a, STR(__LINE__)); // (unsigned)0x1.400002p+3 = 0000000a
+ compi((unsigned)single_of_bits(0x3f7fffff), 0x00000000, STR(__LINE__)); // (unsigned)0x1.fffffep-1 = 00000000
+ compi((unsigned)single_of_bits(0x3fffffff), 0x00000001, STR(__LINE__)); // (unsigned)0x1.fffffep+0 = 00000001
+ compi((unsigned)single_of_bits(0x3fc00000), 0x00000001, STR(__LINE__)); // (unsigned)0x1.8p+0 = 00000001
+ compi((unsigned)single_of_bits(0x40200000), 0x00000002, STR(__LINE__)); // (unsigned)0x1.4p+1 = 00000002
+ compi((unsigned)single_of_bits(0x3fc00001), 0x00000001, STR(__LINE__)); // (unsigned)0x1.800002p+0 = 00000001
+ compi((unsigned)single_of_bits(0x3fbfffff), 0x00000001, STR(__LINE__)); // (unsigned)0x1.7ffffep+0 = 00000001
+ compi((unsigned)single_of_bits(0x4b800001), 0x01000002, STR(__LINE__)); // (unsigned)0x1.000002p+24 = 01000002
+ compi((unsigned)single_of_bits(0x4b7fffff), 0x00ffffff, STR(__LINE__)); // (unsigned)0x1.fffffep+23 = 00ffffff
+}
+
+void f293(void) {
+ compi((unsigned)single_of_bits(0x4b000001), 0x00800001, STR(__LINE__)); // (unsigned)0x1.000002p+23 = 00800001
+ compi((unsigned)single_of_bits(0x4a800001), 0x00400000, STR(__LINE__)); // (unsigned)0x1.000002p+22 = 00400000
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x3f800000), 0x40000000, STR(__LINE__)); // 0x1p+0 + 0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xbf800000), 0xc0000000, STR(__LINE__)); // -0x1p+0 + -0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x40000000), 0x40400000, STR(__LINE__)); // 0x1p+0 + 0x1p+1 = 0x1.8p+1
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x3f800000), 0x40400000, STR(__LINE__)); // 0x1p+1 + 0x1p+0 = 0x1.8p+1
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xc0000000), 0xc0400000, STR(__LINE__)); // -0x1p+0 + -0x1p+1 = -0x1.8p+1
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xbf800000), 0xc0400000, STR(__LINE__)); // -0x1p+1 + -0x1p+0 = -0x1.8p+1
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x40000000), 0x40800000, STR(__LINE__)); // 0x1p+1 + 0x1p+1 = 0x1p+2
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x40e00000), 0x41000000, STR(__LINE__)); // 0x1p+0 + 0x1.cp+2 = 0x1p+3
+}
+
+void f294(void) {
+ comp32(single_of_bits(0x40e00000) + single_of_bits(0x3f800000), 0x41000000, STR(__LINE__)); // 0x1.cp+2 + 0x1p+0 = 0x1p+3
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xc0e00000), 0xc1000000, STR(__LINE__)); // -0x1p+0 + -0x1.cp+2 = -0x1p+3
+ comp32(single_of_bits(0xc0e00000) + single_of_bits(0xbf800000), 0xc1000000, STR(__LINE__)); // -0x1.cp+2 + -0x1p+0 = -0x1p+3
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0xbf800000), 0x40800000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+0 = 0x1p+2
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x40a00000), 0x40800000, STR(__LINE__)); // -0x1p+0 + 0x1.4p+2 = 0x1p+2
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x3f800000), 0xc0800000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+0 = -0x1p+2
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xc0a00000), 0xc0800000, STR(__LINE__)); // 0x1p+0 + -0x1.4p+2 = -0x1p+2
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xc0a00000), 0xc0400000, STR(__LINE__)); // 0x1p+1 + -0x1.4p+2 = -0x1.8p+1
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x40000000), 0xc0400000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+1 = -0x1.8p+1
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x40a00000), 0x40400000, STR(__LINE__)); // -0x1p+1 + 0x1.4p+2 = 0x1.8p+1
+}
+
+void f295(void) {
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0xc0000000), 0x40400000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+1 = 0x1.8p+1
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xc0800000), 0xc0000000, STR(__LINE__)); // 0x1p+1 + -0x1p+2 = -0x1p+1
+ comp32(single_of_bits(0xc0800000) + single_of_bits(0x40000000), 0xc0000000, STR(__LINE__)); // -0x1p+2 + 0x1p+1 = -0x1p+1
+ comp32(single_of_bits(0x40800000) + single_of_bits(0xc0000000), 0x40000000, STR(__LINE__)); // 0x1p+2 + -0x1p+1 = 0x1p+1
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x40800000), 0x40000000, STR(__LINE__)); // -0x1p+1 + 0x1p+2 = 0x1p+1
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // 0x0p+0 + -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // -0x0p+0 + 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x0p+0 + 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // -0x0p+0 + -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x007fffff), 0x007fffff, STR(__LINE__)); // 0x0p+0 + 0x1.fffffcp-127 = 0x1.fffffcp-127
+}
+
+void f296(void) {
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x00000000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-127 + 0x0p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x007fffff), 0x007fffff, STR(__LINE__)); // -0x0p+0 + 0x1.fffffcp-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x80000000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-127 + -0x0p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x807fffff), 0x807fffff, STR(__LINE__)); // 0x0p+0 + -0x1.fffffcp-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x00000000), 0x807fffff, STR(__LINE__)); // -0x1.fffffcp-127 + 0x0p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x807fffff), 0x807fffff, STR(__LINE__)); // -0x0p+0 + -0x1.fffffcp-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x80000000), 0x807fffff, STR(__LINE__)); // -0x1.fffffcp-127 + -0x0p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x00000000), 0x00000003, STR(__LINE__)); // 0x1.8p-148 + 0x0p+0 = 0x1.8p-148
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x00000003), 0x00000003, STR(__LINE__)); // 0x0p+0 + 0x1.8p-148 = 0x1.8p-148
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x80000000), 0x00000003, STR(__LINE__)); // 0x1.8p-148 + -0x0p+0 = 0x1.8p-148
+}
+
+void f297(void) {
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x00000003), 0x00000003, STR(__LINE__)); // -0x0p+0 + 0x1.8p-148 = 0x1.8p-148
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x00000000), 0x80000003, STR(__LINE__)); // -0x1.8p-148 + 0x0p+0 = -0x1.8p-148
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x80000003), 0x80000003, STR(__LINE__)); // 0x0p+0 + -0x1.8p-148 = -0x1.8p-148
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x80000000), 0x80000003, STR(__LINE__)); // -0x1.8p-148 + -0x0p+0 = -0x1.8p-148
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x80000003), 0x80000003, STR(__LINE__)); // -0x0p+0 + -0x1.8p-148 = -0x1.8p-148
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x80800000), 0x80800000, STR(__LINE__)); // -0x0p+0 + -0x1p-126 = -0x1p-126
+ comp32(single_of_bits(0x80800000) + single_of_bits(0x80000000), 0x80800000, STR(__LINE__)); // -0x1p-126 + -0x0p+0 = -0x1p-126
+ comp32(single_of_bits(0x00800000) + single_of_bits(0x00000000), 0x00800000, STR(__LINE__)); // 0x1p-126 + 0x0p+0 = 0x1p-126
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x00800000), 0x00800000, STR(__LINE__)); // 0x0p+0 + 0x1p-126 = 0x1p-126
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x80800000), 0x80800000, STR(__LINE__)); // 0x0p+0 + -0x1p-126 = -0x1p-126
+}
+
+void f298(void) {
+ comp32(single_of_bits(0x80800000) + single_of_bits(0x00000000), 0x80800000, STR(__LINE__)); // -0x1p-126 + 0x0p+0 = -0x1p-126
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // 0x0p+0 + 0x1p+127 = 0x1p+127
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x00000000), 0x7f000000, STR(__LINE__)); // 0x1p+127 + 0x0p+0 = 0x1p+127
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // -0x0p+0 + 0x1p+127 = 0x1p+127
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x80000000), 0x7f000000, STR(__LINE__)); // 0x1p+127 + -0x0p+0 = 0x1p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x00000000), 0xff000000, STR(__LINE__)); // -0x1p+127 + 0x0p+0 = -0x1p+127
+ comp32(single_of_bits(0x00000000) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // 0x0p+0 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x80000000), 0xff000000, STR(__LINE__)); // -0x1p+127 + -0x0p+0 = -0x1p+127
+ comp32(single_of_bits(0x80000000) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // -0x0p+0 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x80000000), 0x3f800000, STR(__LINE__)); // 0x1p+0 + -0x0p+0 = 0x1p+0
+}
+
+void f299(void) {
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x3f800000), 0x3f800000, STR(__LINE__)); // -0x0p+0 + 0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x80000000), 0xbf800000, STR(__LINE__)); // -0x1p+0 + -0x0p+0 = -0x1p+0
+ comp32(single_of_bits(0x80000000) + single_of_bits(0xbf800000), 0xbf800000, STR(__LINE__)); // -0x0p+0 + -0x1p+0 = -0x1p+0
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x3f800000), 0x3f800000, STR(__LINE__)); // 0x0p+0 + 0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x00000000), 0x3f800000, STR(__LINE__)); // 0x1p+0 + 0x0p+0 = 0x1p+0
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0x80000000), 0x40a00000, STR(__LINE__)); // 0x1.4p+2 + -0x0p+0 = 0x1.4p+2
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x40a00000), 0x40a00000, STR(__LINE__)); // -0x0p+0 + 0x1.4p+2 = 0x1.4p+2
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0x00000000), 0x40a00000, STR(__LINE__)); // 0x1.4p+2 + 0x0p+0 = 0x1.4p+2
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x40a00000), 0x40a00000, STR(__LINE__)); // 0x0p+0 + 0x1.4p+2 = 0x1.4p+2
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // inf + 0x0p+0 = inf
+}
+
+void f300(void) {
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x0p+0 + inf = inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // inf + -0x0p+0 = inf
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // -0x0p+0 + inf = inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -inf + 0x0p+0 = -inf
+ comp32(single_of_bits(0x00000000) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x0p+0 + -inf = -inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // -inf + -0x0p+0 = -inf
+ comp32(single_of_bits(0x80000000) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // -0x0p+0 + -inf = -inf
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x00000000), 0x7fc00000, STR(__LINE__)); // nan + 0x0p+0 = nan
+ comp32(single_of_bits(0x00000000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x0p+0 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x80000000), 0x7fc00000, STR(__LINE__)); // nan + -0x0p+0 = nan
+}
+
+void f301(void) {
+ comp32(single_of_bits(0x80000000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x0p+0 + nan = nan
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // inf + inf = inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // -inf + -inf = -inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // -inf + inf = nan
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // inf + -inf = nan
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x007fffff), 0x7f800000, STR(__LINE__)); // inf + 0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp-127 + inf = inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x007fffff), 0xff800000, STR(__LINE__)); // -inf + 0x1.fffffcp-127 = -inf
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.fffffcp-127 + -inf = -inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x807fffff), 0x7f800000, STR(__LINE__)); // inf + -0x1.fffffcp-127 = inf
+}
+
+void f302(void) {
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // -0x1.fffffcp-127 + inf = inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x807fffff), 0xff800000, STR(__LINE__)); // -inf + -0x1.fffffcp-127 = -inf
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // -0x1.fffffcp-127 + -inf = -inf
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.8p-148 + inf = inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x00000003), 0x7f800000, STR(__LINE__)); // inf + 0x1.8p-148 = inf
+ comp32(single_of_bits(0x00000003) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.8p-148 + -inf = -inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x00000003), 0xff800000, STR(__LINE__)); // -inf + 0x1.8p-148 = -inf
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // -0x1.8p-148 + inf = inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x80000003), 0x7f800000, STR(__LINE__)); // inf + -0x1.8p-148 = inf
+ comp32(single_of_bits(0x80000003) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // -0x1.8p-148 + -inf = -inf
+}
+
+void f303(void) {
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x80000003), 0xff800000, STR(__LINE__)); // -inf + -0x1.8p-148 = -inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // inf + 0x1p+127 = inf
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p+127 + inf = inf
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0xff000000), 0x7f800000, STR(__LINE__)); // inf + -0x1p+127 = inf
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // -0x1p+127 + inf = inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -inf + 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1p+127 + -inf = -inf
+ comp32(single_of_bits(0xff800000) + single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // -inf + -0x1p+127 = -inf
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // -0x1p+127 + -inf = -inf
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // nan + inf = nan
+}
+
+void f304(void) {
+ comp32(single_of_bits(0x7f800000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // inf + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // nan + -inf = nan
+ comp32(single_of_bits(0xff800000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -inf + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // nan + nan = nan
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffcp-127 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x007fffff), 0x7fc00000, STR(__LINE__)); // nan + 0x1.fffffcp-127 = nan
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffcp-127 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x807fffff), 0x7fc00000, STR(__LINE__)); // nan + -0x1.fffffcp-127 = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x00000001), 0x7fc00000, STR(__LINE__)); // nan + 0x1p-149 = nan
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p-149 + nan = nan
+}
+
+void f305(void) {
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x80000001), 0x7fc00000, STR(__LINE__)); // nan + -0x1p-149 = nan
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p-149 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x3f800000), 0x7fc00000, STR(__LINE__)); // nan + 0x1p+0 = nan
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p+0 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0xbf800000), 0x7fc00000, STR(__LINE__)); // nan + -0x1p+0 = nan
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p+0 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0x7f7fffff), 0x7fc00000, STR(__LINE__)); // nan + 0x1.fffffep+127 = nan
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffep+127 + nan = nan
+ comp32(single_of_bits(0x7fc00000) + single_of_bits(0xff7fffff), 0x7fc00000, STR(__LINE__)); // nan + -0x1.fffffep+127 = nan
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffep+127 + nan = nan
+}
+
+void f306(void) {
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1p+127 + 0x1p+127 = inf
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // -0x1p+127 + -0x1p+127 = -inf
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0x7f7ffffe), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp+127 + 0x1.fffffcp+127 = inf
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0xff7ffffe), 0xff800000, STR(__LINE__)); // -0x1.fffffcp+127 + -0x1.fffffcp+127 = -inf
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp+127 + 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x7f7ffffe), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 + 0x1.fffffcp+127 = inf
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // -0x1.fffffcp+127 + -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xff7ffffe), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 + -0x1.fffffcp+127 = -inf
+ comp32(single_of_bits(0x7f000001) + single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1.000002p+127 + 0x1p+127 = inf
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x7f000001), 0x7f800000, STR(__LINE__)); // 0x1p+127 + 0x1.000002p+127 = inf
+}
+
+void f307(void) {
+ comp32(single_of_bits(0xff000001) + single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // -0x1.000002p+127 + -0x1p+127 = -inf
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xff000001), 0xff800000, STR(__LINE__)); // -0x1p+127 + -0x1.000002p+127 = -inf
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x74000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 + 0x1p+105 = inf
+ comp32(single_of_bits(0x74000000) + single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1p+105 + 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf4000000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 + -0x1p+105 = -inf
+ comp32(single_of_bits(0xf4000000) + single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // -0x1p+105 + -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x3f800000), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + 0x1p+0 = 0x1.fffffep+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // 0x1p+0 + 0x1.fffffep+127 = 0x1.fffffep+127
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xbf800000), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + -0x1p+0 = -0x1.fffffep+127
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // -0x1p+0 + -0x1.fffffep+127 = -0x1.fffffep+127
+}
+
+void f308(void) {
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // 0x1p-149 + 0x1.fffffep+127 = 0x1.fffffep+127
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x00000001), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + 0x1p-149 = 0x1.fffffep+127
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // -0x1p-149 + -0x1.fffffep+127 = -0x1.fffffep+127
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x80000001), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + -0x1p-149 = -0x1.fffffep+127
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x72800000), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + 0x1p+102 = 0x1.fffffep+127
+ comp32(single_of_bits(0x72800000) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // 0x1p+102 + 0x1.fffffep+127 = 0x1.fffffep+127
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf2800000), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + -0x1p+102 = -0x1.fffffep+127
+ comp32(single_of_bits(0xf2800000) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // -0x1p+102 + -0x1.fffffep+127 = -0x1.fffffep+127
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x72800001), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + 0x1.000002p+102 = 0x1.fffffep+127
+ comp32(single_of_bits(0x72800001) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // 0x1.000002p+102 + 0x1.fffffep+127 = 0x1.fffffep+127
+}
+
+void f309(void) {
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf2800001), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + -0x1.000002p+102 = -0x1.fffffep+127
+ comp32(single_of_bits(0xf2800001) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // -0x1.000002p+102 + -0x1.fffffep+127 = -0x1.fffffep+127
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+126 + 0x1p+127 = inf
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x7effffff), 0x7f800000, STR(__LINE__)); // 0x1p+127 + 0x1.fffffep+126 = inf
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+126 + -0x1p+127 = -inf
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xfeffffff), 0xff800000, STR(__LINE__)); // -0x1p+127 + -0x1.fffffep+126 = -inf
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x73400000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 + 0x1.8p+103 = inf
+ comp32(single_of_bits(0x73400000) + single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.8p+103 + 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf3400000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 + -0x1.8p+103 = -inf
+ comp32(single_of_bits(0xf3400000) + single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // -0x1.8p+103 + -0x1.fffffep+127 = -inf
+}
+
+void f310(void) {
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x73000001), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 + 0x1.000002p+103 = inf
+ comp32(single_of_bits(0x73000001) + single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.000002p+103 + 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf3000001), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 + -0x1.000002p+103 = -inf
+ comp32(single_of_bits(0xf3000001) + single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // -0x1.000002p+103 + -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x73400001), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 + 0x1.800002p+103 = inf
+ comp32(single_of_bits(0x73400001) + single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.800002p+103 + 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0xf3400001), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 + -0x1.800002p+103 = -inf
+ comp32(single_of_bits(0xf3400001) + single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // -0x1.800002p+103 + -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x3f800000), 0x7f000000, STR(__LINE__)); // 0x1p+127 + 0x1p+0 = 0x1p+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // 0x1p+0 + 0x1p+127 = 0x1p+127
+}
+
+void f311(void) {
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xbf800000), 0xff000000, STR(__LINE__)); // -0x1p+127 + -0x1p+0 = -0x1p+127
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // -0x1p+0 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0x3f800000), 0x7effffff, STR(__LINE__)); // 0x1.fffffep+126 + 0x1p+0 = 0x1.fffffep+126
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x7effffff), 0x7effffff, STR(__LINE__)); // 0x1p+0 + 0x1.fffffep+126 = 0x1.fffffep+126
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0x3f800000), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffcp+127 + 0x1p+0 = 0x1.fffffcp+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x7f7ffffe), 0x7f7ffffe, STR(__LINE__)); // 0x1p+0 + 0x1.fffffcp+127 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0xbf800000), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffcp+127 + -0x1p+0 = -0x1.fffffcp+127
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xff7ffffe), 0xff7ffffe, STR(__LINE__)); // -0x1p+0 + -0x1.fffffcp+127 = -0x1.fffffcp+127
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // 0x1p-149 + 0x1p+127 = 0x1p+127
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x00000001), 0x7f000000, STR(__LINE__)); // 0x1p+127 + 0x1p-149 = 0x1p+127
+}
+
+void f312(void) {
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // -0x1p-149 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x80000001), 0xff000000, STR(__LINE__)); // -0x1p+127 + -0x1p-149 = -0x1p+127
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x7effffff), 0x7effffff, STR(__LINE__)); // 0x1p-149 + 0x1.fffffep+126 = 0x1.fffffep+126
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0x00000001), 0x7effffff, STR(__LINE__)); // 0x1.fffffep+126 + 0x1p-149 = 0x1.fffffep+126
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xfeffffff), 0xfeffffff, STR(__LINE__)); // -0x1p-149 + -0x1.fffffep+126 = -0x1.fffffep+126
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0x80000001), 0xfeffffff, STR(__LINE__)); // -0x1.fffffep+126 + -0x1p-149 = -0x1.fffffep+126
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x7f7ffffe), 0x7f7ffffe, STR(__LINE__)); // 0x1p-149 + 0x1.fffffcp+127 = 0x1.fffffcp+127
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0x00000001), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffcp+127 + 0x1p-149 = 0x1.fffffcp+127
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xff7ffffe), 0xff7ffffe, STR(__LINE__)); // -0x1p-149 + -0x1.fffffcp+127 = -0x1.fffffcp+127
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0x80000001), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffcp+127 + -0x1p-149 = -0x1.fffffcp+127
+}
+
+void f313(void) {
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x73800000), 0x7f000001, STR(__LINE__)); // 0x1p+127 + 0x1p+104 = 0x1.000002p+127
+ comp32(single_of_bits(0x73800000) + single_of_bits(0x7f000000), 0x7f000001, STR(__LINE__)); // 0x1p+104 + 0x1p+127 = 0x1.000002p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0xf3800000), 0xff000001, STR(__LINE__)); // -0x1p+127 + -0x1p+104 = -0x1.000002p+127
+ comp32(single_of_bits(0xf3800000) + single_of_bits(0xff000000), 0xff000001, STR(__LINE__)); // -0x1p+104 + -0x1p+127 = -0x1.000002p+127
+ comp32(single_of_bits(0x7efffffe) + single_of_bits(0x7efffffe), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffcp+126 + 0x1.fffffcp+126 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xfefffffe) + single_of_bits(0xfefffffe), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffcp+126 + -0x1.fffffcp+126 = -0x1.fffffcp+127
+ comp32(single_of_bits(0x40400000) + single_of_bits(0x40400000), 0x40c00000, STR(__LINE__)); // 0x1.8p+1 + 0x1.8p+1 = 0x1.8p+2
+ comp32(single_of_bits(0x00800000) + single_of_bits(0x00800000), 0x01000000, STR(__LINE__)); // 0x1p-126 + 0x1p-126 = 0x1p-125
+ comp32(single_of_bits(0x7e800000) + single_of_bits(0x7e800000), 0x7f000000, STR(__LINE__)); // 0x1p+126 + 0x1p+126 = 0x1p+127
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x007fffff), 0x00fffffe, STR(__LINE__)); // 0x1.fffffcp-127 + 0x1.fffffcp-127 = 0x1.fffffcp-126
+}
+
+void f314(void) {
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x807fffff), 0x80fffffe, STR(__LINE__)); // -0x1.fffffcp-127 + -0x1.fffffcp-127 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x00000004) + single_of_bits(0x00000004), 0x00000008, STR(__LINE__)); // 0x1p-147 + 0x1p-147 = 0x1p-146
+ comp32(single_of_bits(0x80000004) + single_of_bits(0x80000004), 0x80000008, STR(__LINE__)); // -0x1p-147 + -0x1p-147 = -0x1p-146
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x00000001), 0x00000002, STR(__LINE__)); // 0x1p-149 + 0x1p-149 = 0x1p-148
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x80000001), 0x80000002, STR(__LINE__)); // -0x1p-149 + -0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x41000000), 0x41100000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+3 = 0x1.2p+3
+ comp32(single_of_bits(0x41000000) + single_of_bits(0x3f800001), 0x41100000, STR(__LINE__)); // 0x1p+3 + 0x1.000002p+0 = 0x1.2p+3
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0xc1000000), 0xc1100000, STR(__LINE__)); // -0x1.000002p+0 + -0x1p+3 = -0x1.2p+3
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0xbf800001), 0xc1100000, STR(__LINE__)); // -0x1p+3 + -0x1.000002p+0 = -0x1.2p+3
+ comp32(single_of_bits(0x4c800000) + single_of_bits(0x3f800000), 0x4c800000, STR(__LINE__)); // 0x1p+26 + 0x1p+0 = 0x1p+26
+}
+
+void f315(void) {
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x4c800000), 0x4c800000, STR(__LINE__)); // 0x1p+0 + 0x1p+26 = 0x1p+26
+ comp32(single_of_bits(0xcc800000) + single_of_bits(0xbf800000), 0xcc800000, STR(__LINE__)); // -0x1p+26 + -0x1p+0 = -0x1p+26
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xcc800000), 0xcc800000, STR(__LINE__)); // -0x1p+0 + -0x1p+26 = -0x1p+26
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x00000001), 0x02000000, STR(__LINE__)); // 0x1p-123 + 0x1p-149 = 0x1p-123
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x02000000), 0x02000000, STR(__LINE__)); // 0x1p-149 + 0x1p-123 = 0x1p-123
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x80000001), 0x82000000, STR(__LINE__)); // -0x1p-123 + -0x1p-149 = -0x1p-123
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x82000000), 0x82000000, STR(__LINE__)); // -0x1p-149 + -0x1p-123 = -0x1p-123
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x3f800000), 0x3f800000, STR(__LINE__)); // 0x1p-149 + 0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x00000001), 0x3f800000, STR(__LINE__)); // 0x1p+0 + 0x1p-149 = 0x1p+0
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xbf800000), 0xbf800000, STR(__LINE__)); // -0x1p-149 + -0x1p+0 = -0x1p+0
+}
+
+void f316(void) {
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x80000001), 0xbf800000, STR(__LINE__)); // -0x1p+0 + -0x1p-149 = -0x1p+0
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x3f7fffff), 0x3f7fffff, STR(__LINE__)); // 0x1p-149 + 0x1.fffffep-1 = 0x1.fffffep-1
+ comp32(single_of_bits(0x3f7fffff) + single_of_bits(0x00000001), 0x3f7fffff, STR(__LINE__)); // 0x1.fffffep-1 + 0x1p-149 = 0x1.fffffep-1
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xbf7fffff), 0xbf7fffff, STR(__LINE__)); // -0x1p-149 + -0x1.fffffep-1 = -0x1.fffffep-1
+ comp32(single_of_bits(0xbf7fffff) + single_of_bits(0x80000001), 0xbf7fffff, STR(__LINE__)); // -0x1.fffffep-1 + -0x1p-149 = -0x1.fffffep-1
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x3fffffff), 0x3fffffff, STR(__LINE__)); // 0x1p-149 + 0x1.fffffep+0 = 0x1.fffffep+0
+ comp32(single_of_bits(0x3fffffff) + single_of_bits(0x00000001), 0x3fffffff, STR(__LINE__)); // 0x1.fffffep+0 + 0x1p-149 = 0x1.fffffep+0
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xbfffffff), 0xbfffffff, STR(__LINE__)); // -0x1p-149 + -0x1.fffffep+0 = -0x1.fffffep+0
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0x80000001), 0xbfffffff, STR(__LINE__)); // -0x1.fffffep+0 + -0x1p-149 = -0x1.fffffep+0
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x3ffffffe), 0x3ffffffe, STR(__LINE__)); // 0x1p-149 + 0x1.fffffcp+0 = 0x1.fffffcp+0
+}
+
+void f317(void) {
+ comp32(single_of_bits(0x3ffffffe) + single_of_bits(0x00000001), 0x3ffffffe, STR(__LINE__)); // 0x1.fffffcp+0 + 0x1p-149 = 0x1.fffffcp+0
+ comp32(single_of_bits(0x80000001) + single_of_bits(0xbffffffe), 0xbffffffe, STR(__LINE__)); // -0x1p-149 + -0x1.fffffcp+0 = -0x1.fffffcp+0
+ comp32(single_of_bits(0xbffffffe) + single_of_bits(0x80000001), 0xbffffffe, STR(__LINE__)); // -0x1.fffffcp+0 + -0x1p-149 = -0x1.fffffcp+0
+ comp32(single_of_bits(0x4cffffff) + single_of_bits(0x41300000), 0x4d000000, STR(__LINE__)); // 0x1.fffffep+26 + 0x1.6p+3 = 0x1p+27
+ comp32(single_of_bits(0x41300000) + single_of_bits(0x4cffffff), 0x4d000000, STR(__LINE__)); // 0x1.6p+3 + 0x1.fffffep+26 = 0x1p+27
+ comp32(single_of_bits(0xccffffff) + single_of_bits(0xc1300000), 0xcd000000, STR(__LINE__)); // -0x1.fffffep+26 + -0x1.6p+3 = -0x1p+27
+ comp32(single_of_bits(0xc1300000) + single_of_bits(0xccffffff), 0xcd000000, STR(__LINE__)); // -0x1.6p+3 + -0x1.fffffep+26 = -0x1p+27
+ comp32(single_of_bits(0x4ce00000) + single_of_bits(0xc0e00000), 0x4cdfffff, STR(__LINE__)); // 0x1.cp+26 + -0x1.cp+2 = 0x1.bffffep+26
+ comp32(single_of_bits(0xc0e00000) + single_of_bits(0x4ce00000), 0x4cdfffff, STR(__LINE__)); // -0x1.cp+2 + 0x1.cp+26 = 0x1.bffffep+26
+ comp32(single_of_bits(0x40e00000) + single_of_bits(0xcce00000), 0xccdfffff, STR(__LINE__)); // 0x1.cp+2 + -0x1.cp+26 = -0x1.bffffep+26
+}
+
+void f318(void) {
+ comp32(single_of_bits(0xcce00000) + single_of_bits(0x40e00000), 0xccdfffff, STR(__LINE__)); // -0x1.cp+26 + 0x1.cp+2 = -0x1.bffffep+26
+ comp32(single_of_bits(0x02800000) + single_of_bits(0x80000007), 0x027fffff, STR(__LINE__)); // 0x1p-122 + -0x1.cp-147 = 0x1.fffffep-123
+ comp32(single_of_bits(0x80000007) + single_of_bits(0x02800000), 0x027fffff, STR(__LINE__)); // -0x1.cp-147 + 0x1p-122 = 0x1.fffffep-123
+ comp32(single_of_bits(0x82800000) + single_of_bits(0x00000007), 0x827fffff, STR(__LINE__)); // -0x1p-122 + 0x1.cp-147 = -0x1.fffffep-123
+ comp32(single_of_bits(0x00000007) + single_of_bits(0x82800000), 0x827fffff, STR(__LINE__)); // 0x1.cp-147 + -0x1p-122 = -0x1.fffffep-123
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x40800000), 0x40a00000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+2 = 0x1.4p+2
+ comp32(single_of_bits(0x40800000) + single_of_bits(0x3f800001), 0x40a00000, STR(__LINE__)); // 0x1p+2 + 0x1.000002p+0 = 0x1.4p+2
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0xc0800000), 0xc0a00000, STR(__LINE__)); // -0x1.000002p+0 + -0x1p+2 = -0x1.4p+2
+ comp32(single_of_bits(0xc0800000) + single_of_bits(0xbf800001), 0xc0a00000, STR(__LINE__)); // -0x1p+2 + -0x1.000002p+0 = -0x1.4p+2
+ comp32(single_of_bits(0x4c000000) + single_of_bits(0x3f800000), 0x4c000000, STR(__LINE__)); // 0x1p+25 + 0x1p+0 = 0x1p+25
+}
+
+void f319(void) {
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x4c000000), 0x4c000000, STR(__LINE__)); // 0x1p+0 + 0x1p+25 = 0x1p+25
+ comp32(single_of_bits(0xcc000000) + single_of_bits(0xbf800000), 0xcc000000, STR(__LINE__)); // -0x1p+25 + -0x1p+0 = -0x1p+25
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xcc000000), 0xcc000000, STR(__LINE__)); // -0x1p+0 + -0x1p+25 = -0x1p+25
+ comp32(single_of_bits(0x41200000) + single_of_bits(0x4cffffff), 0x4d000000, STR(__LINE__)); // 0x1.4p+3 + 0x1.fffffep+26 = 0x1p+27
+ comp32(single_of_bits(0x4cffffff) + single_of_bits(0x41200000), 0x4d000000, STR(__LINE__)); // 0x1.fffffep+26 + 0x1.4p+3 = 0x1p+27
+ comp32(single_of_bits(0xc1200000) + single_of_bits(0xccffffff), 0xcd000000, STR(__LINE__)); // -0x1.4p+3 + -0x1.fffffep+26 = -0x1p+27
+ comp32(single_of_bits(0xccffffff) + single_of_bits(0xc1200000), 0xcd000000, STR(__LINE__)); // -0x1.fffffep+26 + -0x1.4p+3 = -0x1p+27
+ comp32(single_of_bits(0x40800000) + single_of_bits(0xb4400000), 0x407fffff, STR(__LINE__)); // 0x1p+2 + -0x1.8p-23 = 0x1.fffffep+1
+ comp32(single_of_bits(0xb4400000) + single_of_bits(0x40800000), 0x407fffff, STR(__LINE__)); // -0x1.8p-23 + 0x1p+2 = 0x1.fffffep+1
+ comp32(single_of_bits(0x40400000) + single_of_bits(0xcc800000), 0xcc7fffff, STR(__LINE__)); // 0x1.8p+1 + -0x1p+26 = -0x1.fffffep+25
+}
+
+void f320(void) {
+ comp32(single_of_bits(0xcc800000) + single_of_bits(0x40400000), 0xcc7fffff, STR(__LINE__)); // -0x1p+26 + 0x1.8p+1 = -0x1.fffffep+25
+ comp32(single_of_bits(0xc0800000) + single_of_bits(0x34400000), 0xc07fffff, STR(__LINE__)); // -0x1p+2 + 0x1.8p-23 = -0x1.fffffep+1
+ comp32(single_of_bits(0x34400000) + single_of_bits(0xc0800000), 0xc07fffff, STR(__LINE__)); // 0x1.8p-23 + -0x1p+2 = -0x1.fffffep+1
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0x4c800000), 0x4c7fffff, STR(__LINE__)); // -0x1.8p+1 + 0x1p+26 = 0x1.fffffep+25
+ comp32(single_of_bits(0x4c800000) + single_of_bits(0xc0400000), 0x4c7fffff, STR(__LINE__)); // 0x1p+26 + -0x1.8p+1 = 0x1.fffffep+25
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x80000003), 0x01ffffff, STR(__LINE__)); // 0x1p-123 + -0x1.8p-148 = 0x1.fffffep-124
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x02000000), 0x01ffffff, STR(__LINE__)); // -0x1.8p-148 + 0x1p-123 = 0x1.fffffep-124
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x00000003), 0x81ffffff, STR(__LINE__)); // -0x1p-123 + 0x1.8p-148 = -0x1.fffffep-124
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x82000000), 0x81ffffff, STR(__LINE__)); // 0x1.8p-148 + -0x1p-123 = -0x1.fffffep-124
+ comp32(single_of_bits(0x3f800003) + single_of_bits(0x41000000), 0x41100000, STR(__LINE__)); // 0x1.000006p+0 + 0x1p+3 = 0x1.2p+3
+}
+
+void f321(void) {
+ comp32(single_of_bits(0x41000000) + single_of_bits(0x3f800003), 0x41100000, STR(__LINE__)); // 0x1p+3 + 0x1.000006p+0 = 0x1.2p+3
+ comp32(single_of_bits(0xbf800003) + single_of_bits(0xc1000000), 0xc1100000, STR(__LINE__)); // -0x1.000006p+0 + -0x1p+3 = -0x1.2p+3
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0xbf800003), 0xc1100000, STR(__LINE__)); // -0x1p+3 + -0x1.000006p+0 = -0x1.2p+3
+ comp32(single_of_bits(0x407fffff) + single_of_bits(0x33ffffff), 0x407fffff, STR(__LINE__)); // 0x1.fffffep+1 + 0x1.fffffep-24 = 0x1.fffffep+1
+ comp32(single_of_bits(0x33ffffff) + single_of_bits(0x407fffff), 0x407fffff, STR(__LINE__)); // 0x1.fffffep-24 + 0x1.fffffep+1 = 0x1.fffffep+1
+ comp32(single_of_bits(0xc07fffff) + single_of_bits(0xb3ffffff), 0xc07fffff, STR(__LINE__)); // -0x1.fffffep+1 + -0x1.fffffep-24 = -0x1.fffffep+1
+ comp32(single_of_bits(0xb3ffffff) + single_of_bits(0xc07fffff), 0xc07fffff, STR(__LINE__)); // -0x1.fffffep-24 + -0x1.fffffep+1 = -0x1.fffffep+1
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x00000003), 0x02000000, STR(__LINE__)); // 0x1p-123 + 0x1.8p-148 = 0x1p-123
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x02000000), 0x02000000, STR(__LINE__)); // 0x1.8p-148 + 0x1p-123 = 0x1p-123
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x82000000), 0x82000000, STR(__LINE__)); // -0x1.8p-148 + -0x1p-123 = -0x1p-123
+}
+
+void f322(void) {
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x80000003), 0x82000000, STR(__LINE__)); // -0x1p-123 + -0x1.8p-148 = -0x1p-123
+ comp32(single_of_bits(0x41600000) + single_of_bits(0x4cffffff), 0x4d000000, STR(__LINE__)); // 0x1.cp+3 + 0x1.fffffep+26 = 0x1p+27
+ comp32(single_of_bits(0x4cffffff) + single_of_bits(0x41600000), 0x4d000000, STR(__LINE__)); // 0x1.fffffep+26 + 0x1.cp+3 = 0x1p+27
+ comp32(single_of_bits(0xc1600000) + single_of_bits(0xccffffff), 0xcd000000, STR(__LINE__)); // -0x1.cp+3 + -0x1.fffffep+26 = -0x1p+27
+ comp32(single_of_bits(0xccffffff) + single_of_bits(0xc1600000), 0xcd000000, STR(__LINE__)); // -0x1.fffffep+26 + -0x1.cp+3 = -0x1p+27
+ comp32(single_of_bits(0x41000000) + single_of_bits(0xb4a00000), 0x40ffffff, STR(__LINE__)); // 0x1p+3 + -0x1.4p-22 = 0x1.fffffep+2
+ comp32(single_of_bits(0xb4a00000) + single_of_bits(0x41000000), 0x40ffffff, STR(__LINE__)); // -0x1.4p-22 + 0x1p+3 = 0x1.fffffep+2
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0xcd000000), 0xccffffff, STR(__LINE__)); // 0x1.4p+2 + -0x1p+27 = -0x1.fffffep+26
+ comp32(single_of_bits(0xcd000000) + single_of_bits(0x40a00000), 0xccffffff, STR(__LINE__)); // -0x1p+27 + 0x1.4p+2 = -0x1.fffffep+26
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0x34a00000), 0xc0ffffff, STR(__LINE__)); // -0x1p+3 + 0x1.4p-22 = -0x1.fffffep+2
+}
+
+void f323(void) {
+ comp32(single_of_bits(0x34a00000) + single_of_bits(0xc1000000), 0xc0ffffff, STR(__LINE__)); // 0x1.4p-22 + -0x1p+3 = -0x1.fffffep+2
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x4d000000), 0x4cffffff, STR(__LINE__)); // -0x1.4p+2 + 0x1p+27 = 0x1.fffffep+26
+ comp32(single_of_bits(0x4d000000) + single_of_bits(0xc0a00000), 0x4cffffff, STR(__LINE__)); // 0x1p+27 + -0x1.4p+2 = 0x1.fffffep+26
+ comp32(single_of_bits(0x02800000) + single_of_bits(0x80000005), 0x027fffff, STR(__LINE__)); // 0x1p-122 + -0x1.4p-147 = 0x1.fffffep-123
+ comp32(single_of_bits(0x80000005) + single_of_bits(0x02800000), 0x027fffff, STR(__LINE__)); // -0x1.4p-147 + 0x1p-122 = 0x1.fffffep-123
+ comp32(single_of_bits(0x82800000) + single_of_bits(0x00000005), 0x827fffff, STR(__LINE__)); // -0x1p-122 + 0x1.4p-147 = -0x1.fffffep-123
+ comp32(single_of_bits(0x00000005) + single_of_bits(0x82800000), 0x827fffff, STR(__LINE__)); // 0x1.4p-147 + -0x1p-122 = -0x1.fffffep-123
+ comp32(single_of_bits(0x34000000) + single_of_bits(0x40000000), 0x40000000, STR(__LINE__)); // 0x1p-23 + 0x1p+1 = 0x1p+1
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x34000000), 0x40000000, STR(__LINE__)); // 0x1p+1 + 0x1p-23 = 0x1p+1
+ comp32(single_of_bits(0xb4000000) + single_of_bits(0xc0000000), 0xc0000000, STR(__LINE__)); // -0x1p-23 + -0x1p+1 = -0x1p+1
+}
+
+void f324(void) {
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xb4000000), 0xc0000000, STR(__LINE__)); // -0x1p+1 + -0x1p-23 = -0x1p+1
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x01000000), 0x01000000, STR(__LINE__)); // 0x1p-149 + 0x1p-125 = 0x1p-125
+ comp32(single_of_bits(0x01000000) + single_of_bits(0x00000001), 0x01000000, STR(__LINE__)); // 0x1p-125 + 0x1p-149 = 0x1p-125
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x81000000), 0x81000000, STR(__LINE__)); // -0x1p-149 + -0x1p-125 = -0x1p-125
+ comp32(single_of_bits(0x81000000) + single_of_bits(0x80000001), 0x81000000, STR(__LINE__)); // -0x1p-125 + -0x1p-149 = -0x1p-125
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x3f800000), 0x40000000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x3f800001), 0x40000000, STR(__LINE__)); // 0x1p+0 + 0x1.000002p+0 = 0x1p+1
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0xbf800000), 0xc0000000, STR(__LINE__)); // -0x1.000002p+0 + -0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xbf800001), 0xc0000000, STR(__LINE__)); // -0x1p+0 + -0x1.000002p+0 = -0x1p+1
+ comp32(single_of_bits(0x7e800001) + single_of_bits(0x7e800000), 0x7f000000, STR(__LINE__)); // 0x1.000002p+126 + 0x1p+126 = 0x1p+127
+}
+
+void f325(void) {
+ comp32(single_of_bits(0x7e800000) + single_of_bits(0x7e800001), 0x7f000000, STR(__LINE__)); // 0x1p+126 + 0x1.000002p+126 = 0x1p+127
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x40000001), 0x40800000, STR(__LINE__)); // 0x1p+1 + 0x1.000002p+1 = 0x1p+2
+ comp32(single_of_bits(0x40000001) + single_of_bits(0x40000000), 0x40800000, STR(__LINE__)); // 0x1.000002p+1 + 0x1p+1 = 0x1p+2
+ comp32(single_of_bits(0x7efffffe) + single_of_bits(0x7effffff), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffcp+126 + 0x1.fffffep+126 = 0x1.fffffcp+127
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0x7efffffe), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffep+126 + 0x1.fffffcp+126 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xfe800001) + single_of_bits(0xfe800000), 0xff000000, STR(__LINE__)); // -0x1.000002p+126 + -0x1p+126 = -0x1p+127
+ comp32(single_of_bits(0xfe800000) + single_of_bits(0xfe800001), 0xff000000, STR(__LINE__)); // -0x1p+126 + -0x1.000002p+126 = -0x1p+127
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xc0000001), 0xc0800000, STR(__LINE__)); // -0x1p+1 + -0x1.000002p+1 = -0x1p+2
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0xc0000000), 0xc0800000, STR(__LINE__)); // -0x1.000002p+1 + -0x1p+1 = -0x1p+2
+ comp32(single_of_bits(0xfefffffe) + single_of_bits(0xfeffffff), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffcp+126 + -0x1.fffffep+126 = -0x1.fffffcp+127
+}
+
+void f326(void) {
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0xfefffffe), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffep+126 + -0x1.fffffcp+126 = -0x1.fffffcp+127
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0xb3800000), 0x3f800000, STR(__LINE__)); // 0x1.000002p+0 + -0x1p-24 = 0x1p+0
+ comp32(single_of_bits(0xb3800000) + single_of_bits(0x3f800001), 0x3f800000, STR(__LINE__)); // -0x1p-24 + 0x1.000002p+0 = 0x1p+0
+ comp32(single_of_bits(0x33800000) + single_of_bits(0xbf800001), 0xbf800000, STR(__LINE__)); // 0x1p-24 + -0x1.000002p+0 = -0x1p+0
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0x33800000), 0xbf800000, STR(__LINE__)); // -0x1.000002p+0 + 0x1p-24 = -0x1p+0
+ comp32(single_of_bits(0x01000001) + single_of_bits(0x80000001), 0x01000000, STR(__LINE__)); // 0x1.000002p-125 + -0x1p-149 = 0x1p-125
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x01000001), 0x01000000, STR(__LINE__)); // -0x1p-149 + 0x1.000002p-125 = 0x1p-125
+ comp32(single_of_bits(0x81000001) + single_of_bits(0x00000001), 0x81000000, STR(__LINE__)); // -0x1.000002p-125 + 0x1p-149 = -0x1p-125
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x81000001), 0x81000000, STR(__LINE__)); // 0x1p-149 + -0x1.000002p-125 = -0x1p-125
+ comp32(single_of_bits(0x34000000) + single_of_bits(0x407fffff), 0x40800000, STR(__LINE__)); // 0x1p-23 + 0x1.fffffep+1 = 0x1p+2
+}
+
+void f327(void) {
+ comp32(single_of_bits(0x407fffff) + single_of_bits(0x34000000), 0x40800000, STR(__LINE__)); // 0x1.fffffep+1 + 0x1p-23 = 0x1p+2
+ comp32(single_of_bits(0xb4000000) + single_of_bits(0xc07fffff), 0xc0800000, STR(__LINE__)); // -0x1p-23 + -0x1.fffffep+1 = -0x1p+2
+ comp32(single_of_bits(0xc07fffff) + single_of_bits(0xb4000000), 0xc0800000, STR(__LINE__)); // -0x1.fffffep+1 + -0x1p-23 = -0x1p+2
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x01000001), 0x01000002, STR(__LINE__)); // 0x1p-149 + 0x1.000002p-125 = 0x1.000004p-125
+ comp32(single_of_bits(0x01000001) + single_of_bits(0x00000001), 0x01000002, STR(__LINE__)); // 0x1.000002p-125 + 0x1p-149 = 0x1.000004p-125
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x81000001), 0x81000002, STR(__LINE__)); // -0x1p-149 + -0x1.000002p-125 = -0x1.000004p-125
+ comp32(single_of_bits(0x81000001) + single_of_bits(0x80000001), 0x81000002, STR(__LINE__)); // -0x1.000002p-125 + -0x1p-149 = -0x1.000004p-125
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x3f800003), 0x40000002, STR(__LINE__)); // 0x1p+0 + 0x1.000006p+0 = 0x1.000004p+1
+ comp32(single_of_bits(0x3f800003) + single_of_bits(0x3f800000), 0x40000002, STR(__LINE__)); // 0x1.000006p+0 + 0x1p+0 = 0x1.000004p+1
+ comp32(single_of_bits(0x40000001) + single_of_bits(0x40000002), 0x40800002, STR(__LINE__)); // 0x1.000002p+1 + 0x1.000004p+1 = 0x1.000004p+2
+}
+
+void f328(void) {
+ comp32(single_of_bits(0x40000002) + single_of_bits(0x40000001), 0x40800002, STR(__LINE__)); // 0x1.000004p+1 + 0x1.000002p+1 = 0x1.000004p+2
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xbf800003), 0xc0000002, STR(__LINE__)); // -0x1p+0 + -0x1.000006p+0 = -0x1.000004p+1
+ comp32(single_of_bits(0xbf800003) + single_of_bits(0xbf800000), 0xc0000002, STR(__LINE__)); // -0x1.000006p+0 + -0x1p+0 = -0x1.000004p+1
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0xc0000002), 0xc0800002, STR(__LINE__)); // -0x1.000002p+1 + -0x1.000004p+1 = -0x1.000004p+2
+ comp32(single_of_bits(0xc0000002) + single_of_bits(0xc0000001), 0xc0800002, STR(__LINE__)); // -0x1.000004p+1 + -0x1.000002p+1 = -0x1.000004p+2
+ comp32(single_of_bits(0x34000000) + single_of_bits(0x40000001), 0x40000002, STR(__LINE__)); // 0x1p-23 + 0x1.000002p+1 = 0x1.000004p+1
+ comp32(single_of_bits(0x40000001) + single_of_bits(0x34000000), 0x40000002, STR(__LINE__)); // 0x1.000002p+1 + 0x1p-23 = 0x1.000004p+1
+ comp32(single_of_bits(0xb4000000) + single_of_bits(0xc0000001), 0xc0000002, STR(__LINE__)); // -0x1p-23 + -0x1.000002p+1 = -0x1.000004p+1
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0xb4000000), 0xc0000002, STR(__LINE__)); // -0x1.000002p+1 + -0x1p-23 = -0x1.000004p+1
+ comp32(single_of_bits(0x40000002) + single_of_bits(0xb4000000), 0x40000002, STR(__LINE__)); // 0x1.000004p+1 + -0x1p-23 = 0x1.000004p+1
+}
+
+void f329(void) {
+ comp32(single_of_bits(0xb4000000) + single_of_bits(0x40000002), 0x40000002, STR(__LINE__)); // -0x1p-23 + 0x1.000004p+1 = 0x1.000004p+1
+ comp32(single_of_bits(0x34000000) + single_of_bits(0xc0000002), 0xc0000002, STR(__LINE__)); // 0x1p-23 + -0x1.000004p+1 = -0x1.000004p+1
+ comp32(single_of_bits(0xc0000002) + single_of_bits(0x34000000), 0xc0000002, STR(__LINE__)); // -0x1.000004p+1 + 0x1p-23 = -0x1.000004p+1
+ comp32(single_of_bits(0x01800002) + single_of_bits(0x80000001), 0x01800002, STR(__LINE__)); // 0x1.000004p-124 + -0x1p-149 = 0x1.000004p-124
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x01800002), 0x01800002, STR(__LINE__)); // -0x1p-149 + 0x1.000004p-124 = 0x1.000004p-124
+ comp32(single_of_bits(0x81800002) + single_of_bits(0x00000001), 0x81800002, STR(__LINE__)); // -0x1.000004p-124 + 0x1p-149 = -0x1.000004p-124
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x81800002), 0x81800002, STR(__LINE__)); // 0x1p-149 + -0x1.000004p-124 = -0x1.000004p-124
+ comp32(single_of_bits(0x3f800003) + single_of_bits(0x40800000), 0x40a00001, STR(__LINE__)); // 0x1.000006p+0 + 0x1p+2 = 0x1.400002p+2
+ comp32(single_of_bits(0x40800000) + single_of_bits(0x3f800003), 0x40a00001, STR(__LINE__)); // 0x1p+2 + 0x1.000006p+0 = 0x1.400002p+2
+ comp32(single_of_bits(0xbf800003) + single_of_bits(0xc0800000), 0xc0a00001, STR(__LINE__)); // -0x1.000006p+0 + -0x1p+2 = -0x1.400002p+2
+}
+
+void f330(void) {
+ comp32(single_of_bits(0xc0800000) + single_of_bits(0xbf800003), 0xc0a00001, STR(__LINE__)); // -0x1p+2 + -0x1.000006p+0 = -0x1.400002p+2
+ comp32(single_of_bits(0x40400000) + single_of_bits(0x4c400000), 0x4c400001, STR(__LINE__)); // 0x1.8p+1 + 0x1.8p+25 = 0x1.800002p+25
+ comp32(single_of_bits(0x4c400000) + single_of_bits(0x40400000), 0x4c400001, STR(__LINE__)); // 0x1.8p+25 + 0x1.8p+1 = 0x1.800002p+25
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0xcc400000), 0xcc400001, STR(__LINE__)); // -0x1.8p+1 + -0x1.8p+25 = -0x1.800002p+25
+ comp32(single_of_bits(0xcc400000) + single_of_bits(0xc0400000), 0xcc400001, STR(__LINE__)); // -0x1.8p+25 + -0x1.8p+1 = -0x1.800002p+25
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x01800000), 0x01800001, STR(__LINE__)); // 0x1.8p-148 + 0x1p-124 = 0x1.000002p-124
+ comp32(single_of_bits(0x01800000) + single_of_bits(0x00000003), 0x01800001, STR(__LINE__)); // 0x1p-124 + 0x1.8p-148 = 0x1.000002p-124
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x81800000), 0x81800001, STR(__LINE__)); // -0x1.8p-148 + -0x1p-124 = -0x1.000002p-124
+ comp32(single_of_bits(0x81800000) + single_of_bits(0x80000003), 0x81800001, STR(__LINE__)); // -0x1p-124 + -0x1.8p-148 = -0x1.000002p-124
+ comp32(single_of_bits(0x3fffffff) + single_of_bits(0x34a00000), 0x40000001, STR(__LINE__)); // 0x1.fffffep+0 + 0x1.4p-22 = 0x1.000002p+1
+}
+
+void f331(void) {
+ comp32(single_of_bits(0x34a00000) + single_of_bits(0x3fffffff), 0x40000001, STR(__LINE__)); // 0x1.4p-22 + 0x1.fffffep+0 = 0x1.000002p+1
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0xb4a00000), 0xc0000001, STR(__LINE__)); // -0x1.fffffep+0 + -0x1.4p-22 = -0x1.000002p+1
+ comp32(single_of_bits(0xb4a00000) + single_of_bits(0xbfffffff), 0xc0000001, STR(__LINE__)); // -0x1.4p-22 + -0x1.fffffep+0 = -0x1.000002p+1
+ comp32(single_of_bits(0x4c800000) + single_of_bits(0xbf800000), 0x4c800000, STR(__LINE__)); // 0x1p+26 + -0x1p+0 = 0x1p+26
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x4c800000), 0x4c800000, STR(__LINE__)); // -0x1p+0 + 0x1p+26 = 0x1p+26
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xcc800000), 0xcc800000, STR(__LINE__)); // 0x1p+0 + -0x1p+26 = -0x1p+26
+ comp32(single_of_bits(0xcc800000) + single_of_bits(0x3f800000), 0xcc800000, STR(__LINE__)); // -0x1p+26 + 0x1p+0 = -0x1p+26
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x80000001), 0x02000000, STR(__LINE__)); // 0x1p-123 + -0x1p-149 = 0x1p-123
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x02000000), 0x02000000, STR(__LINE__)); // -0x1p-149 + 0x1p-123 = 0x1p-123
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x00000001), 0x82000000, STR(__LINE__)); // -0x1p-123 + 0x1p-149 = -0x1p-123
+}
+
+void f332(void) {
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x82000000), 0x82000000, STR(__LINE__)); // 0x1p-149 + -0x1p-123 = -0x1p-123
+ comp32(single_of_bits(0x3f800005) + single_of_bits(0x41000000), 0x41100001, STR(__LINE__)); // 0x1.00000ap+0 + 0x1p+3 = 0x1.200002p+3
+ comp32(single_of_bits(0x41000000) + single_of_bits(0x3f800005), 0x41100001, STR(__LINE__)); // 0x1p+3 + 0x1.00000ap+0 = 0x1.200002p+3
+ comp32(single_of_bits(0xbf800005) + single_of_bits(0xc1000000), 0xc1100001, STR(__LINE__)); // -0x1.00000ap+0 + -0x1p+3 = -0x1.200002p+3
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0xbf800005), 0xc1100001, STR(__LINE__)); // -0x1p+3 + -0x1.00000ap+0 = -0x1.200002p+3
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0x4c800000), 0x4c800001, STR(__LINE__)); // 0x1.4p+2 + 0x1p+26 = 0x1.000002p+26
+ comp32(single_of_bits(0x4c800000) + single_of_bits(0x40a00000), 0x4c800001, STR(__LINE__)); // 0x1p+26 + 0x1.4p+2 = 0x1.000002p+26
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0xcc800000), 0xcc800001, STR(__LINE__)); // -0x1.4p+2 + -0x1p+26 = -0x1.000002p+26
+ comp32(single_of_bits(0xcc800000) + single_of_bits(0xc0a00000), 0xcc800001, STR(__LINE__)); // -0x1p+26 + -0x1.4p+2 = -0x1.000002p+26
+ comp32(single_of_bits(0x00000005) + single_of_bits(0x02000000), 0x02000001, STR(__LINE__)); // 0x1.4p-147 + 0x1p-123 = 0x1.000002p-123
+}
+
+void f333(void) {
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x00000005), 0x02000001, STR(__LINE__)); // 0x1p-123 + 0x1.4p-147 = 0x1.000002p-123
+ comp32(single_of_bits(0x80000005) + single_of_bits(0x82000000), 0x82000001, STR(__LINE__)); // -0x1.4p-147 + -0x1p-123 = -0x1.000002p-123
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x80000005), 0x82000001, STR(__LINE__)); // -0x1p-123 + -0x1.4p-147 = -0x1.000002p-123
+ comp32(single_of_bits(0x3fffffff) + single_of_bits(0x34800001), 0x40000001, STR(__LINE__)); // 0x1.fffffep+0 + 0x1.000002p-22 = 0x1.000002p+1
+ comp32(single_of_bits(0x34800001) + single_of_bits(0x3fffffff), 0x40000001, STR(__LINE__)); // 0x1.000002p-22 + 0x1.fffffep+0 = 0x1.000002p+1
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0xb4800001), 0xc0000001, STR(__LINE__)); // -0x1.fffffep+0 + -0x1.000002p-22 = -0x1.000002p+1
+ comp32(single_of_bits(0xb4800001) + single_of_bits(0xbfffffff), 0xc0000001, STR(__LINE__)); // -0x1.000002p-22 + -0x1.fffffep+0 = -0x1.000002p+1
+ comp32(single_of_bits(0x4d400000) + single_of_bits(0xc0400000), 0x4d400000, STR(__LINE__)); // 0x1.8p+27 + -0x1.8p+1 = 0x1.8p+27
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0x4d400000), 0x4d400000, STR(__LINE__)); // -0x1.8p+1 + 0x1.8p+27 = 0x1.8p+27
+ comp32(single_of_bits(0x40400000) + single_of_bits(0xcd400000), 0xcd400000, STR(__LINE__)); // 0x1.8p+1 + -0x1.8p+27 = -0x1.8p+27
+}
+
+void f334(void) {
+ comp32(single_of_bits(0xcd400000) + single_of_bits(0x40400000), 0xcd400000, STR(__LINE__)); // -0x1.8p+27 + 0x1.8p+1 = -0x1.8p+27
+ comp32(single_of_bits(0x02800000) + single_of_bits(0x80000003), 0x02800000, STR(__LINE__)); // 0x1p-122 + -0x1.8p-148 = 0x1p-122
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x02800000), 0x02800000, STR(__LINE__)); // -0x1.8p-148 + 0x1p-122 = 0x1p-122
+ comp32(single_of_bits(0x82800000) + single_of_bits(0x00000003), 0x82800000, STR(__LINE__)); // -0x1p-122 + 0x1.8p-148 = -0x1p-122
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x82800000), 0x82800000, STR(__LINE__)); // 0x1.8p-148 + -0x1p-122 = -0x1p-122
+ comp32(single_of_bits(0x3f800007) + single_of_bits(0x41000000), 0x41100001, STR(__LINE__)); // 0x1.00000ep+0 + 0x1p+3 = 0x1.200002p+3
+ comp32(single_of_bits(0x41000000) + single_of_bits(0x3f800007), 0x41100001, STR(__LINE__)); // 0x1p+3 + 0x1.00000ep+0 = 0x1.200002p+3
+ comp32(single_of_bits(0xbf800007) + single_of_bits(0xc1000000), 0xc1100001, STR(__LINE__)); // -0x1.00000ep+0 + -0x1p+3 = -0x1.200002p+3
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0xbf800007), 0xc1100001, STR(__LINE__)); // -0x1p+3 + -0x1.00000ep+0 = -0x1.200002p+3
+ comp32(single_of_bits(0x40e00000) + single_of_bits(0x4ce00000), 0x4ce00001, STR(__LINE__)); // 0x1.cp+2 + 0x1.cp+26 = 0x1.c00002p+26
+}
+
+void f335(void) {
+ comp32(single_of_bits(0x4ce00000) + single_of_bits(0x40e00000), 0x4ce00001, STR(__LINE__)); // 0x1.cp+26 + 0x1.cp+2 = 0x1.c00002p+26
+ comp32(single_of_bits(0xc0e00000) + single_of_bits(0xcce00000), 0xcce00001, STR(__LINE__)); // -0x1.cp+2 + -0x1.cp+26 = -0x1.c00002p+26
+ comp32(single_of_bits(0xcce00000) + single_of_bits(0xc0e00000), 0xcce00001, STR(__LINE__)); // -0x1.cp+26 + -0x1.cp+2 = -0x1.c00002p+26
+ comp32(single_of_bits(0x00000007) + single_of_bits(0x02000000), 0x02000001, STR(__LINE__)); // 0x1.cp-147 + 0x1p-123 = 0x1.000002p-123
+ comp32(single_of_bits(0x02000000) + single_of_bits(0x00000007), 0x02000001, STR(__LINE__)); // 0x1p-123 + 0x1.cp-147 = 0x1.000002p-123
+ comp32(single_of_bits(0x80000007) + single_of_bits(0x82000000), 0x82000001, STR(__LINE__)); // -0x1.cp-147 + -0x1p-123 = -0x1.000002p-123
+ comp32(single_of_bits(0x82000000) + single_of_bits(0x80000007), 0x82000001, STR(__LINE__)); // -0x1p-123 + -0x1.cp-147 = -0x1.000002p-123
+ comp32(single_of_bits(0x3fffffff) + single_of_bits(0x34a00001), 0x40000001, STR(__LINE__)); // 0x1.fffffep+0 + 0x1.400002p-22 = 0x1.000002p+1
+ comp32(single_of_bits(0x34a00001) + single_of_bits(0x3fffffff), 0x40000001, STR(__LINE__)); // 0x1.400002p-22 + 0x1.fffffep+0 = 0x1.000002p+1
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0xb4a00001), 0xc0000001, STR(__LINE__)); // -0x1.fffffep+0 + -0x1.400002p-22 = -0x1.000002p+1
+}
+
+void f336(void) {
+ comp32(single_of_bits(0xb4a00001) + single_of_bits(0xbfffffff), 0xc0000001, STR(__LINE__)); // -0x1.400002p-22 + -0x1.fffffep+0 = -0x1.000002p+1
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0xbf800000), 0x7f000000, STR(__LINE__)); // 0x1p+127 + -0x1p+0 = 0x1p+127
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // -0x1p+0 + 0x1p+127 = 0x1p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x3f800000), 0xff000000, STR(__LINE__)); // -0x1p+127 + 0x1p+0 = -0x1p+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // 0x1p+0 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0xbf800000), 0x7effffff, STR(__LINE__)); // 0x1.fffffep+126 + -0x1p+0 = 0x1.fffffep+126
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x7effffff), 0x7effffff, STR(__LINE__)); // -0x1p+0 + 0x1.fffffep+126 = 0x1.fffffep+126
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0x3f800000), 0xfeffffff, STR(__LINE__)); // -0x1.fffffep+126 + 0x1p+0 = -0x1.fffffep+126
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xfeffffff), 0xfeffffff, STR(__LINE__)); // 0x1p+0 + -0x1.fffffep+126 = -0x1.fffffep+126
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0xbf800000), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + -0x1p+0 = 0x1.fffffep+127
+}
+
+void f337(void) {
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // -0x1p+0 + 0x1.fffffep+127 = 0x1.fffffep+127
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x3f800000), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + 0x1p+0 = -0x1.fffffep+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // 0x1p+0 + -0x1.fffffep+127 = -0x1.fffffep+127
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0xbf800000), 0x7f7ffffe, STR(__LINE__)); // 0x1.fffffcp+127 + -0x1p+0 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x7f7ffffe), 0x7f7ffffe, STR(__LINE__)); // -0x1p+0 + 0x1.fffffcp+127 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0x3f800000), 0xff7ffffe, STR(__LINE__)); // -0x1.fffffcp+127 + 0x1p+0 = -0x1.fffffcp+127
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xff7ffffe), 0xff7ffffe, STR(__LINE__)); // 0x1p+0 + -0x1.fffffcp+127 = -0x1.fffffcp+127
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0x80000001), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+127 + -0x1p-149 = 0x1.fffffep+127
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x7f7fffff), 0x7f7fffff, STR(__LINE__)); // -0x1p-149 + 0x1.fffffep+127 = 0x1.fffffep+127
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x00000001), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+127 + 0x1p-149 = -0x1.fffffep+127
+}
+
+void f338(void) {
+ comp32(single_of_bits(0x00000001) + single_of_bits(0xff7fffff), 0xff7fffff, STR(__LINE__)); // 0x1p-149 + -0x1.fffffep+127 = -0x1.fffffep+127
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x7f000000), 0x7f000000, STR(__LINE__)); // -0x1.8p-148 + 0x1p+127 = 0x1p+127
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0x80000003), 0x7f000000, STR(__LINE__)); // 0x1p+127 + -0x1.8p-148 = 0x1p+127
+ comp32(single_of_bits(0x00000003) + single_of_bits(0xff000000), 0xff000000, STR(__LINE__)); // 0x1.8p-148 + -0x1p+127 = -0x1p+127
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x00000003), 0xff000000, STR(__LINE__)); // -0x1p+127 + 0x1.8p-148 = -0x1p+127
+ comp32(single_of_bits(0x3f7fffff) + single_of_bits(0x80000001), 0x3f7fffff, STR(__LINE__)); // 0x1.fffffep-1 + -0x1p-149 = 0x1.fffffep-1
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x3f7fffff), 0x3f7fffff, STR(__LINE__)); // -0x1p-149 + 0x1.fffffep-1 = 0x1.fffffep-1
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0x00000001), 0xbfffffff, STR(__LINE__)); // -0x1.fffffep+0 + 0x1p-149 = -0x1.fffffep+0
+ comp32(single_of_bits(0x00000001) + single_of_bits(0xbfffffff), 0xbfffffff, STR(__LINE__)); // 0x1p-149 + -0x1.fffffep+0 = -0x1.fffffep+0
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x40400000), 0x40400000, STR(__LINE__)); // -0x1.8p-148 + 0x1.8p+1 = 0x1.8p+1
+}
+
+void f339(void) {
+ comp32(single_of_bits(0x40400000) + single_of_bits(0x80000003), 0x40400000, STR(__LINE__)); // 0x1.8p+1 + -0x1.8p-148 = 0x1.8p+1
+ comp32(single_of_bits(0x00000003) + single_of_bits(0xc0a00000), 0xc0a00000, STR(__LINE__)); // 0x1.8p-148 + -0x1.4p+2 = -0x1.4p+2
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x00000003), 0xc0a00000, STR(__LINE__)); // -0x1.4p+2 + 0x1.8p-148 = -0x1.4p+2
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xc0000000), 0x00000000, STR(__LINE__)); // 0x1p+1 + -0x1p+1 = 0x0p+0
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x40000000), 0x00000000, STR(__LINE__)); // -0x1p+1 + 0x1p+1 = 0x0p+0
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0xc0a00000), 0x00000000, STR(__LINE__)); // 0x1.4p+2 + -0x1.4p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x40a00000), 0x00000000, STR(__LINE__)); // -0x1.4p+2 + 0x1.4p+2 = 0x0p+0
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0xff000000), 0x00000000, STR(__LINE__)); // 0x1p+127 + -0x1p+127 = 0x0p+0
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x7f000000), 0x00000000, STR(__LINE__)); // -0x1p+127 + 0x1p+127 = 0x0p+0
+ comp32(single_of_bits(0xfefffffe) + single_of_bits(0x7efffffe), 0x00000000, STR(__LINE__)); // -0x1.fffffcp+126 + 0x1.fffffcp+126 = 0x0p+0
+}
+
+void f340(void) {
+ comp32(single_of_bits(0x7efffffe) + single_of_bits(0xfefffffe), 0x00000000, STR(__LINE__)); // 0x1.fffffcp+126 + -0x1.fffffcp+126 = 0x0p+0
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xbf800000), 0x00000000, STR(__LINE__)); // 0x1p+0 + -0x1p+0 = 0x0p+0
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x3f800000), 0x00000000, STR(__LINE__)); // -0x1p+0 + 0x1p+0 = 0x0p+0
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0x40400000), 0x00000000, STR(__LINE__)); // -0x1.8p+1 + 0x1.8p+1 = 0x0p+0
+ comp32(single_of_bits(0x40400000) + single_of_bits(0xc0400000), 0x00000000, STR(__LINE__)); // 0x1.8p+1 + -0x1.8p+1 = 0x0p+0
+ comp32(single_of_bits(0x00800000) + single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // 0x1p-126 + -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) + single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // -0x1p-126 + 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x007ffffc) + single_of_bits(0x807ffffc), 0x00000000, STR(__LINE__)); // 0x1.fffffp-127 + -0x1.fffffp-127 = 0x0p+0
+ comp32(single_of_bits(0x807ffffc) + single_of_bits(0x007ffffc), 0x00000000, STR(__LINE__)); // -0x1.fffffp-127 + 0x1.fffffp-127 = 0x0p+0
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x007fffff), 0x00000000, STR(__LINE__)); // -0x1.fffffcp-127 + 0x1.fffffcp-127 = 0x0p+0
+}
+
+void f341(void) {
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x807fffff), 0x00000000, STR(__LINE__)); // 0x1.fffffcp-127 + -0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // 0x1p-149 + -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // -0x1p-149 + 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0xff7fffff), 0x00000000, STR(__LINE__)); // 0x1.fffffep+127 + -0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x7f7fffff), 0x00000000, STR(__LINE__)); // -0x1.fffffep+127 + 0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0xbf800000), 0x34000000, STR(__LINE__)); // 0x1.000002p+0 + -0x1p+0 = 0x1p-23
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x3f800001), 0x34000000, STR(__LINE__)); // -0x1p+0 + 0x1.000002p+0 = 0x1p-23
+ comp32(single_of_bits(0x7f000001) + single_of_bits(0xff000000), 0x73800000, STR(__LINE__)); // 0x1.000002p+127 + -0x1p+127 = 0x1p+104
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x7f000001), 0x73800000, STR(__LINE__)); // -0x1p+127 + 0x1.000002p+127 = 0x1p+104
+ comp32(single_of_bits(0x00800001) + single_of_bits(0x80800000), 0x00000001, STR(__LINE__)); // 0x1.000002p-126 + -0x1p-126 = 0x1p-149
+}
+
+void f342(void) {
+ comp32(single_of_bits(0x80800000) + single_of_bits(0x00800001), 0x00000001, STR(__LINE__)); // -0x1p-126 + 0x1.000002p-126 = 0x1p-149
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x40000001), 0x34800000, STR(__LINE__)); // -0x1p+1 + 0x1.000002p+1 = 0x1p-22
+ comp32(single_of_bits(0x40000001) + single_of_bits(0xc0000000), 0x34800000, STR(__LINE__)); // 0x1.000002p+1 + -0x1p+1 = 0x1p-22
+ comp32(single_of_bits(0xfe800000) + single_of_bits(0x7e800001), 0x73000000, STR(__LINE__)); // -0x1p+126 + 0x1.000002p+126 = 0x1p+103
+ comp32(single_of_bits(0x7e800001) + single_of_bits(0xfe800000), 0x73000000, STR(__LINE__)); // 0x1.000002p+126 + -0x1p+126 = 0x1p+103
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0x3f800000), 0xb4000000, STR(__LINE__)); // -0x1.000002p+0 + 0x1p+0 = -0x1p-23
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xbf800001), 0xb4000000, STR(__LINE__)); // 0x1p+0 + -0x1.000002p+0 = -0x1p-23
+ comp32(single_of_bits(0xff000001) + single_of_bits(0x7f000000), 0xf3800000, STR(__LINE__)); // -0x1.000002p+127 + 0x1p+127 = -0x1p+104
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0xff000001), 0xf3800000, STR(__LINE__)); // 0x1p+127 + -0x1.000002p+127 = -0x1p+104
+ comp32(single_of_bits(0x80800001) + single_of_bits(0x00800000), 0x80000001, STR(__LINE__)); // -0x1.000002p-126 + 0x1p-126 = -0x1p-149
+}
+
+void f343(void) {
+ comp32(single_of_bits(0x00800000) + single_of_bits(0x80800001), 0x80000001, STR(__LINE__)); // 0x1p-126 + -0x1.000002p-126 = -0x1p-149
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xc0000001), 0xb4800000, STR(__LINE__)); // 0x1p+1 + -0x1.000002p+1 = -0x1p-22
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0x40000000), 0xb4800000, STR(__LINE__)); // -0x1.000002p+1 + 0x1p+1 = -0x1p-22
+ comp32(single_of_bits(0x7e800000) + single_of_bits(0xfe800001), 0xf3000000, STR(__LINE__)); // 0x1p+126 + -0x1.000002p+126 = -0x1p+103
+ comp32(single_of_bits(0xfe800001) + single_of_bits(0x7e800000), 0xf3000000, STR(__LINE__)); // -0x1.000002p+126 + 0x1p+126 = -0x1p+103
+ comp32(single_of_bits(0x00000002) + single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // 0x1p-148 + -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x00000002), 0x00000001, STR(__LINE__)); // -0x1p-149 + 0x1p-148 = 0x1p-149
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0x3f800002), 0x34000000, STR(__LINE__)); // -0x1.000002p+0 + 0x1.000004p+0 = 0x1p-23
+ comp32(single_of_bits(0x3f800002) + single_of_bits(0xbf800001), 0x34000000, STR(__LINE__)); // 0x1.000004p+0 + -0x1.000002p+0 = 0x1p-23
+ comp32(single_of_bits(0xff000001) + single_of_bits(0x7f000002), 0x73800000, STR(__LINE__)); // -0x1.000002p+127 + 0x1.000004p+127 = 0x1p+104
+}
+
+void f344(void) {
+ comp32(single_of_bits(0x7f000002) + single_of_bits(0xff000001), 0x73800000, STR(__LINE__)); // 0x1.000004p+127 + -0x1.000002p+127 = 0x1p+104
+ comp32(single_of_bits(0x80800001) + single_of_bits(0x00800002), 0x00000001, STR(__LINE__)); // -0x1.000002p-126 + 0x1.000004p-126 = 0x1p-149
+ comp32(single_of_bits(0x00800002) + single_of_bits(0x80800001), 0x00000001, STR(__LINE__)); // 0x1.000004p-126 + -0x1.000002p-126 = 0x1p-149
+ comp32(single_of_bits(0x80000002) + single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1p-148 + 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x80000002), 0x80000001, STR(__LINE__)); // 0x1p-149 + -0x1p-148 = -0x1p-149
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0xbf800002), 0xb4000000, STR(__LINE__)); // 0x1.000002p+0 + -0x1.000004p+0 = -0x1p-23
+ comp32(single_of_bits(0xbf800002) + single_of_bits(0x3f800001), 0xb4000000, STR(__LINE__)); // -0x1.000004p+0 + 0x1.000002p+0 = -0x1p-23
+ comp32(single_of_bits(0x7f000001) + single_of_bits(0xff000002), 0xf3800000, STR(__LINE__)); // 0x1.000002p+127 + -0x1.000004p+127 = -0x1p+104
+ comp32(single_of_bits(0xff000002) + single_of_bits(0x7f000001), 0xf3800000, STR(__LINE__)); // -0x1.000004p+127 + 0x1.000002p+127 = -0x1p+104
+ comp32(single_of_bits(0x00800001) + single_of_bits(0x80800002), 0x80000001, STR(__LINE__)); // 0x1.000002p-126 + -0x1.000004p-126 = -0x1p-149
+}
+
+void f345(void) {
+ comp32(single_of_bits(0x80800002) + single_of_bits(0x00800001), 0x80000001, STR(__LINE__)); // -0x1.000004p-126 + 0x1.000002p-126 = -0x1p-149
+ comp32(single_of_bits(0x80000003) + single_of_bits(0x00000002), 0x80000001, STR(__LINE__)); // -0x1.8p-148 + 0x1p-148 = -0x1p-149
+ comp32(single_of_bits(0x00000002) + single_of_bits(0x80000003), 0x80000001, STR(__LINE__)); // 0x1p-148 + -0x1.8p-148 = -0x1p-149
+ comp32(single_of_bits(0x00000003) + single_of_bits(0x80000002), 0x00000001, STR(__LINE__)); // 0x1.8p-148 + -0x1p-148 = 0x1p-149
+ comp32(single_of_bits(0x80000002) + single_of_bits(0x00000003), 0x00000001, STR(__LINE__)); // -0x1p-148 + 0x1.8p-148 = 0x1p-149
+ comp32(single_of_bits(0x40000004) + single_of_bits(0xc0000003), 0x34800000, STR(__LINE__)); // 0x1.000008p+1 + -0x1.000006p+1 = 0x1p-22
+ comp32(single_of_bits(0xc0000003) + single_of_bits(0x40000004), 0x34800000, STR(__LINE__)); // -0x1.000006p+1 + 0x1.000008p+1 = 0x1p-22
+ comp32(single_of_bits(0x7e800004) + single_of_bits(0xfe800003), 0x73000000, STR(__LINE__)); // 0x1.000008p+126 + -0x1.000006p+126 = 0x1p+103
+ comp32(single_of_bits(0xfe800003) + single_of_bits(0x7e800004), 0x73000000, STR(__LINE__)); // -0x1.000006p+126 + 0x1.000008p+126 = 0x1p+103
+ comp32(single_of_bits(0xc0000004) + single_of_bits(0x40000003), 0xb4800000, STR(__LINE__)); // -0x1.000008p+1 + 0x1.000006p+1 = -0x1p-22
+}
+
+void f346(void) {
+ comp32(single_of_bits(0x40000003) + single_of_bits(0xc0000004), 0xb4800000, STR(__LINE__)); // 0x1.000006p+1 + -0x1.000008p+1 = -0x1p-22
+ comp32(single_of_bits(0xfe800004) + single_of_bits(0x7e800003), 0xf3000000, STR(__LINE__)); // -0x1.000008p+126 + 0x1.000006p+126 = -0x1p+103
+ comp32(single_of_bits(0x7e800003) + single_of_bits(0xfe800004), 0xf3000000, STR(__LINE__)); // 0x1.000006p+126 + -0x1.000008p+126 = -0x1p+103
+ comp32(single_of_bits(0x407fffff) + single_of_bits(0xc07ffffe), 0x34800000, STR(__LINE__)); // 0x1.fffffep+1 + -0x1.fffffcp+1 = 0x1p-22
+ comp32(single_of_bits(0xc07ffffe) + single_of_bits(0x407fffff), 0x34800000, STR(__LINE__)); // -0x1.fffffcp+1 + 0x1.fffffep+1 = 0x1p-22
+ comp32(single_of_bits(0x7e7fffff) + single_of_bits(0xfe7ffffe), 0x72800000, STR(__LINE__)); // 0x1.fffffep+125 + -0x1.fffffcp+125 = 0x1p+102
+ comp32(single_of_bits(0xfe7ffffe) + single_of_bits(0x7e7fffff), 0x72800000, STR(__LINE__)); // -0x1.fffffcp+125 + 0x1.fffffep+125 = 0x1p+102
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x807ffffe), 0x00000001, STR(__LINE__)); // 0x1.fffffcp-127 + -0x1.fffff8p-127 = 0x1p-149
+ comp32(single_of_bits(0x807ffffe) + single_of_bits(0x007fffff), 0x00000001, STR(__LINE__)); // -0x1.fffff8p-127 + 0x1.fffffcp-127 = 0x1p-149
+ comp32(single_of_bits(0xff7ffffe) + single_of_bits(0x7f7fffff), 0x73800000, STR(__LINE__)); // -0x1.fffffcp+127 + 0x1.fffffep+127 = 0x1p+104
+}
+
+void f347(void) {
+ comp32(single_of_bits(0x7f7fffff) + single_of_bits(0xff7ffffe), 0x73800000, STR(__LINE__)); // 0x1.fffffep+127 + -0x1.fffffcp+127 = 0x1p+104
+ comp32(single_of_bits(0xc07fffff) + single_of_bits(0x407ffffe), 0xb4800000, STR(__LINE__)); // -0x1.fffffep+1 + 0x1.fffffcp+1 = -0x1p-22
+ comp32(single_of_bits(0x407ffffe) + single_of_bits(0xc07fffff), 0xb4800000, STR(__LINE__)); // 0x1.fffffcp+1 + -0x1.fffffep+1 = -0x1p-22
+ comp32(single_of_bits(0xfe7fffff) + single_of_bits(0x7e7ffffe), 0xf2800000, STR(__LINE__)); // -0x1.fffffep+125 + 0x1.fffffcp+125 = -0x1p+102
+ comp32(single_of_bits(0x7e7ffffe) + single_of_bits(0xfe7fffff), 0xf2800000, STR(__LINE__)); // 0x1.fffffcp+125 + -0x1.fffffep+125 = -0x1p+102
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x007ffffe), 0x80000001, STR(__LINE__)); // -0x1.fffffcp-127 + 0x1.fffff8p-127 = -0x1p-149
+ comp32(single_of_bits(0x007ffffe) + single_of_bits(0x807fffff), 0x80000001, STR(__LINE__)); // 0x1.fffff8p-127 + -0x1.fffffcp-127 = -0x1p-149
+ comp32(single_of_bits(0x7f7ffffe) + single_of_bits(0xff7fffff), 0xf3800000, STR(__LINE__)); // 0x1.fffffcp+127 + -0x1.fffffep+127 = -0x1p+104
+ comp32(single_of_bits(0xff7fffff) + single_of_bits(0x7f7ffffe), 0xf3800000, STR(__LINE__)); // -0x1.fffffep+127 + 0x1.fffffcp+127 = -0x1p+104
+ comp32(single_of_bits(0x007ffffd) + single_of_bits(0x807ffffe), 0x80000001, STR(__LINE__)); // 0x1.fffff4p-127 + -0x1.fffff8p-127 = -0x1p-149
+}
+
+void f348(void) {
+ comp32(single_of_bits(0x807ffffe) + single_of_bits(0x007ffffd), 0x80000001, STR(__LINE__)); // -0x1.fffff8p-127 + 0x1.fffff4p-127 = -0x1p-149
+ comp32(single_of_bits(0x807ffffd) + single_of_bits(0x007ffffe), 0x00000001, STR(__LINE__)); // -0x1.fffff4p-127 + 0x1.fffff8p-127 = 0x1p-149
+ comp32(single_of_bits(0x007ffffe) + single_of_bits(0x807ffffd), 0x00000001, STR(__LINE__)); // 0x1.fffff8p-127 + -0x1.fffff4p-127 = 0x1p-149
+ comp32(single_of_bits(0x3ffffffc) + single_of_bits(0xbffffffd), 0xb4000000, STR(__LINE__)); // 0x1.fffff8p+0 + -0x1.fffffap+0 = -0x1p-23
+ comp32(single_of_bits(0xbffffffd) + single_of_bits(0x3ffffffc), 0xb4000000, STR(__LINE__)); // -0x1.fffffap+0 + 0x1.fffff8p+0 = -0x1p-23
+ comp32(single_of_bits(0xbffffffc) + single_of_bits(0x3ffffffd), 0x34000000, STR(__LINE__)); // -0x1.fffff8p+0 + 0x1.fffffap+0 = 0x1p-23
+ comp32(single_of_bits(0x3ffffffd) + single_of_bits(0xbffffffc), 0x34000000, STR(__LINE__)); // 0x1.fffffap+0 + -0x1.fffff8p+0 = 0x1p-23
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x80800000), 0x80000001, STR(__LINE__)); // 0x1.fffffcp-127 + -0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x80800000) + single_of_bits(0x007fffff), 0x80000001, STR(__LINE__)); // -0x1p-126 + 0x1.fffffcp-127 = -0x1p-149
+ comp32(single_of_bits(0x3fffffff) + single_of_bits(0xc0000000), 0xb4000000, STR(__LINE__)); // 0x1.fffffep+0 + -0x1p+1 = -0x1p-23
+}
+
+void f349(void) {
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x3fffffff), 0xb4000000, STR(__LINE__)); // -0x1p+1 + 0x1.fffffep+0 = -0x1p-23
+ comp32(single_of_bits(0x017fffff) + single_of_bits(0x81800000), 0x80000002, STR(__LINE__)); // 0x1.fffffep-125 + -0x1p-124 = -0x1p-148
+ comp32(single_of_bits(0x81800000) + single_of_bits(0x017fffff), 0x80000002, STR(__LINE__)); // -0x1p-124 + 0x1.fffffep-125 = -0x1p-148
+ comp32(single_of_bits(0x7effffff) + single_of_bits(0xff000000), 0xf3000000, STR(__LINE__)); // 0x1.fffffep+126 + -0x1p+127 = -0x1p+103
+ comp32(single_of_bits(0xff000000) + single_of_bits(0x7effffff), 0xf3000000, STR(__LINE__)); // -0x1p+127 + 0x1.fffffep+126 = -0x1p+103
+ comp32(single_of_bits(0x00ffffff) + single_of_bits(0x81000000), 0x80000001, STR(__LINE__)); // 0x1.fffffep-126 + -0x1p-125 = -0x1p-149
+ comp32(single_of_bits(0x81000000) + single_of_bits(0x00ffffff), 0x80000001, STR(__LINE__)); // -0x1p-125 + 0x1.fffffep-126 = -0x1p-149
+ comp32(single_of_bits(0xfe800000) + single_of_bits(0x7e7fffff), 0xf2800000, STR(__LINE__)); // -0x1p+126 + 0x1.fffffep+125 = -0x1p+102
+ comp32(single_of_bits(0x7e7fffff) + single_of_bits(0xfe800000), 0xf2800000, STR(__LINE__)); // 0x1.fffffep+125 + -0x1p+126 = -0x1p+102
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xbfffffff), 0x34000000, STR(__LINE__)); // 0x1p+1 + -0x1.fffffep+0 = 0x1p-23
+}
+
+void f350(void) {
+ comp32(single_of_bits(0xbfffffff) + single_of_bits(0x40000000), 0x34000000, STR(__LINE__)); // -0x1.fffffep+0 + 0x1p+1 = 0x1p-23
+ comp32(single_of_bits(0x7e800000) + single_of_bits(0xfe7fffff), 0x72800000, STR(__LINE__)); // 0x1p+126 + -0x1.fffffep+125 = 0x1p+102
+ comp32(single_of_bits(0xfe7fffff) + single_of_bits(0x7e800000), 0x72800000, STR(__LINE__)); // -0x1.fffffep+125 + 0x1p+126 = 0x1p+102
+ comp32(single_of_bits(0x01000000) + single_of_bits(0x80ffffff), 0x00000001, STR(__LINE__)); // 0x1p-125 + -0x1.fffffep-126 = 0x1p-149
+ comp32(single_of_bits(0x80ffffff) + single_of_bits(0x01000000), 0x00000001, STR(__LINE__)); // -0x1.fffffep-126 + 0x1p-125 = 0x1p-149
+ comp32(single_of_bits(0x01800000) + single_of_bits(0x817fffff), 0x00000002, STR(__LINE__)); // 0x1p-124 + -0x1.fffffep-125 = 0x1p-148
+ comp32(single_of_bits(0x817fffff) + single_of_bits(0x01800000), 0x00000002, STR(__LINE__)); // -0x1.fffffep-125 + 0x1p-124 = 0x1p-148
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x00800000), 0x00000001, STR(__LINE__)); // -0x1.fffffcp-127 + 0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x00800000) + single_of_bits(0x807fffff), 0x00000001, STR(__LINE__)); // 0x1p-126 + -0x1.fffffcp-127 = 0x1p-149
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0x7f000000), 0x73000000, STR(__LINE__)); // -0x1.fffffep+126 + 0x1p+127 = 0x1p+103
+}
+
+void f351(void) {
+ comp32(single_of_bits(0x7f000000) + single_of_bits(0xfeffffff), 0x73000000, STR(__LINE__)); // 0x1p+127 + -0x1.fffffep+126 = 0x1p+103
+ comp32(single_of_bits(0x407fffff) + single_of_bits(0xc0800001), 0xb5400000, STR(__LINE__)); // 0x1.fffffep+1 + -0x1.000002p+2 = -0x1.8p-21
+ comp32(single_of_bits(0xc0800001) + single_of_bits(0x407fffff), 0xb5400000, STR(__LINE__)); // -0x1.000002p+2 + 0x1.fffffep+1 = -0x1.8p-21
+ comp32(single_of_bits(0xfd800001) + single_of_bits(0x7d7fffff), 0xf2400000, STR(__LINE__)); // -0x1.000002p+124 + 0x1.fffffep+123 = -0x1.8p+101
+ comp32(single_of_bits(0x7d7fffff) + single_of_bits(0xfd800001), 0xf2400000, STR(__LINE__)); // 0x1.fffffep+123 + -0x1.000002p+124 = -0x1.8p+101
+ comp32(single_of_bits(0x81000001) + single_of_bits(0x00ffffff), 0x80000003, STR(__LINE__)); // -0x1.000002p-125 + 0x1.fffffep-126 = -0x1.8p-148
+ comp32(single_of_bits(0x00ffffff) + single_of_bits(0x81000001), 0x80000003, STR(__LINE__)); // 0x1.fffffep-126 + -0x1.000002p-125 = -0x1.8p-148
+ comp32(single_of_bits(0x81800001) + single_of_bits(0x017fffff), 0x80000006, STR(__LINE__)); // -0x1.000002p-124 + 0x1.fffffep-125 = -0x1.8p-147
+ comp32(single_of_bits(0x017fffff) + single_of_bits(0x81800001), 0x80000006, STR(__LINE__)); // 0x1.fffffep-125 + -0x1.000002p-124 = -0x1.8p-147
+ comp32(single_of_bits(0xc07fffff) + single_of_bits(0x40800001), 0x35400000, STR(__LINE__)); // -0x1.fffffep+1 + 0x1.000002p+2 = 0x1.8p-21
+}
+
+void f352(void) {
+ comp32(single_of_bits(0x40800001) + single_of_bits(0xc07fffff), 0x35400000, STR(__LINE__)); // 0x1.000002p+2 + -0x1.fffffep+1 = 0x1.8p-21
+ comp32(single_of_bits(0x7d800001) + single_of_bits(0xfd7fffff), 0x72400000, STR(__LINE__)); // 0x1.000002p+124 + -0x1.fffffep+123 = 0x1.8p+101
+ comp32(single_of_bits(0xfd7fffff) + single_of_bits(0x7d800001), 0x72400000, STR(__LINE__)); // -0x1.fffffep+123 + 0x1.000002p+124 = 0x1.8p+101
+ comp32(single_of_bits(0x01000001) + single_of_bits(0x80ffffff), 0x00000003, STR(__LINE__)); // 0x1.000002p-125 + -0x1.fffffep-126 = 0x1.8p-148
+ comp32(single_of_bits(0x80ffffff) + single_of_bits(0x01000001), 0x00000003, STR(__LINE__)); // -0x1.fffffep-126 + 0x1.000002p-125 = 0x1.8p-148
+ comp32(single_of_bits(0x01800001) + single_of_bits(0x817fffff), 0x00000006, STR(__LINE__)); // 0x1.000002p-124 + -0x1.fffffep-125 = 0x1.8p-147
+ comp32(single_of_bits(0x817fffff) + single_of_bits(0x01800001), 0x00000006, STR(__LINE__)); // -0x1.fffffep-125 + 0x1.000002p-124 = 0x1.8p-147
+ comp32(single_of_bits(0x407fffff) + single_of_bits(0xc0800002), 0xb5a00000, STR(__LINE__)); // 0x1.fffffep+1 + -0x1.000004p+2 = -0x1.4p-20
+ comp32(single_of_bits(0xc0800002) + single_of_bits(0x407fffff), 0xb5a00000, STR(__LINE__)); // -0x1.000004p+2 + 0x1.fffffep+1 = -0x1.4p-20
+ comp32(single_of_bits(0x7e7fffff) + single_of_bits(0xfe800002), 0xf3a00000, STR(__LINE__)); // 0x1.fffffep+125 + -0x1.000004p+126 = -0x1.4p+104
+}
+
+void f353(void) {
+ comp32(single_of_bits(0xfe800002) + single_of_bits(0x7e7fffff), 0xf3a00000, STR(__LINE__)); // -0x1.000004p+126 + 0x1.fffffep+125 = -0x1.4p+104
+ comp32(single_of_bits(0x00ffffff) + single_of_bits(0x81000002), 0x80000005, STR(__LINE__)); // 0x1.fffffep-126 + -0x1.000004p-125 = -0x1.4p-147
+ comp32(single_of_bits(0x81000002) + single_of_bits(0x00ffffff), 0x80000005, STR(__LINE__)); // -0x1.000004p-125 + 0x1.fffffep-126 = -0x1.4p-147
+ comp32(single_of_bits(0xc07fffff) + single_of_bits(0x40800002), 0x35a00000, STR(__LINE__)); // -0x1.fffffep+1 + 0x1.000004p+2 = 0x1.4p-20
+ comp32(single_of_bits(0x40800002) + single_of_bits(0xc07fffff), 0x35a00000, STR(__LINE__)); // 0x1.000004p+2 + -0x1.fffffep+1 = 0x1.4p-20
+ comp32(single_of_bits(0xfe7fffff) + single_of_bits(0x7e800002), 0x73a00000, STR(__LINE__)); // -0x1.fffffep+125 + 0x1.000004p+126 = 0x1.4p+104
+ comp32(single_of_bits(0x7e800002) + single_of_bits(0xfe7fffff), 0x73a00000, STR(__LINE__)); // 0x1.000004p+126 + -0x1.fffffep+125 = 0x1.4p+104
+ comp32(single_of_bits(0x80ffffff) + single_of_bits(0x01000002), 0x00000005, STR(__LINE__)); // -0x1.fffffep-126 + 0x1.000004p-125 = 0x1.4p-147
+ comp32(single_of_bits(0x01000002) + single_of_bits(0x80ffffff), 0x00000005, STR(__LINE__)); // 0x1.000004p-125 + -0x1.fffffep-126 = 0x1.4p-147
+ comp32(single_of_bits(0x00ffffff) + single_of_bits(0x81000004), 0x80000009, STR(__LINE__)); // 0x1.fffffep-126 + -0x1.000008p-125 = -0x1.2p-146
+}
+
+void f354(void) {
+ comp32(single_of_bits(0x81000004) + single_of_bits(0x00ffffff), 0x80000009, STR(__LINE__)); // -0x1.000008p-125 + 0x1.fffffep-126 = -0x1.2p-146
+ comp32(single_of_bits(0x80ffffff) + single_of_bits(0x01000004), 0x00000009, STR(__LINE__)); // -0x1.fffffep-126 + 0x1.000008p-125 = 0x1.2p-146
+ comp32(single_of_bits(0x01000004) + single_of_bits(0x80ffffff), 0x00000009, STR(__LINE__)); // 0x1.000008p-125 + -0x1.fffffep-126 = 0x1.2p-146
+ comp32(single_of_bits(0x40000001) + single_of_bits(0xbf800001), 0x3f800001, STR(__LINE__)); // 0x1.000002p+1 + -0x1.000002p+0 = 0x1.000002p+0
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0x40000001), 0x3f800001, STR(__LINE__)); // -0x1.000002p+0 + 0x1.000002p+1 = 0x1.000002p+0
+ comp32(single_of_bits(0x01000001) + single_of_bits(0x80800001), 0x00800001, STR(__LINE__)); // 0x1.000002p-125 + -0x1.000002p-126 = 0x1.000002p-126
+ comp32(single_of_bits(0x80800001) + single_of_bits(0x01000001), 0x00800001, STR(__LINE__)); // -0x1.000002p-126 + 0x1.000002p-125 = 0x1.000002p-126
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0x3f800001), 0xbf800001, STR(__LINE__)); // -0x1.000002p+1 + 0x1.000002p+0 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0xc0000001), 0xbf800001, STR(__LINE__)); // 0x1.000002p+0 + -0x1.000002p+1 = -0x1.000002p+0
+ comp32(single_of_bits(0x81000001) + single_of_bits(0x00800001), 0x80800001, STR(__LINE__)); // -0x1.000002p-125 + 0x1.000002p-126 = -0x1.000002p-126
+}
+
+void f355(void) {
+ comp32(single_of_bits(0x00800001) + single_of_bits(0x81000001), 0x80800001, STR(__LINE__)); // 0x1.000002p-126 + -0x1.000002p-125 = -0x1.000002p-126
+ comp32(single_of_bits(0xfe800001) + single_of_bits(0x7f000001), 0x7e800001, STR(__LINE__)); // -0x1.000002p+126 + 0x1.000002p+127 = 0x1.000002p+126
+ comp32(single_of_bits(0x7f000001) + single_of_bits(0xfe800001), 0x7e800001, STR(__LINE__)); // 0x1.000002p+127 + -0x1.000002p+126 = 0x1.000002p+126
+ comp32(single_of_bits(0x7e800001) + single_of_bits(0xff000001), 0xfe800001, STR(__LINE__)); // 0x1.000002p+126 + -0x1.000002p+127 = -0x1.000002p+126
+ comp32(single_of_bits(0xff000001) + single_of_bits(0x7e800001), 0xfe800001, STR(__LINE__)); // -0x1.000002p+127 + 0x1.000002p+126 = -0x1.000002p+126
+ comp32(single_of_bits(0x40000002) + single_of_bits(0xbf800001), 0x3f800003, STR(__LINE__)); // 0x1.000004p+1 + -0x1.000002p+0 = 0x1.000006p+0
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0x40000002), 0x3f800003, STR(__LINE__)); // -0x1.000002p+0 + 0x1.000004p+1 = 0x1.000006p+0
+ comp32(single_of_bits(0x7f000002) + single_of_bits(0xfe800001), 0x7e800003, STR(__LINE__)); // 0x1.000004p+127 + -0x1.000002p+126 = 0x1.000006p+126
+ comp32(single_of_bits(0xfe800001) + single_of_bits(0x7f000002), 0x7e800003, STR(__LINE__)); // -0x1.000002p+126 + 0x1.000004p+127 = 0x1.000006p+126
+ comp32(single_of_bits(0x01000002) + single_of_bits(0x80800001), 0x00800003, STR(__LINE__)); // 0x1.000004p-125 + -0x1.000002p-126 = 0x1.000006p-126
+}
+
+void f356(void) {
+ comp32(single_of_bits(0x80800001) + single_of_bits(0x01000002), 0x00800003, STR(__LINE__)); // -0x1.000002p-126 + 0x1.000004p-125 = 0x1.000006p-126
+ comp32(single_of_bits(0xc0000002) + single_of_bits(0x3f800001), 0xbf800003, STR(__LINE__)); // -0x1.000004p+1 + 0x1.000002p+0 = -0x1.000006p+0
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0xc0000002), 0xbf800003, STR(__LINE__)); // 0x1.000002p+0 + -0x1.000004p+1 = -0x1.000006p+0
+ comp32(single_of_bits(0xff000002) + single_of_bits(0x7e800001), 0xfe800003, STR(__LINE__)); // -0x1.000004p+127 + 0x1.000002p+126 = -0x1.000006p+126
+ comp32(single_of_bits(0x7e800001) + single_of_bits(0xff000002), 0xfe800003, STR(__LINE__)); // 0x1.000002p+126 + -0x1.000004p+127 = -0x1.000006p+126
+ comp32(single_of_bits(0x81000002) + single_of_bits(0x00800001), 0x80800003, STR(__LINE__)); // -0x1.000004p-125 + 0x1.000002p-126 = -0x1.000006p-126
+ comp32(single_of_bits(0x00800001) + single_of_bits(0x81000002), 0x80800003, STR(__LINE__)); // 0x1.000002p-126 + -0x1.000004p-125 = -0x1.000006p-126
+ comp32(single_of_bits(0x40000002) + single_of_bits(0xbf800003), 0x3f800001, STR(__LINE__)); // 0x1.000004p+1 + -0x1.000006p+0 = 0x1.000002p+0
+ comp32(single_of_bits(0xbf800003) + single_of_bits(0x40000002), 0x3f800001, STR(__LINE__)); // -0x1.000006p+0 + 0x1.000004p+1 = 0x1.000002p+0
+ comp32(single_of_bits(0x7e800002) + single_of_bits(0xfe000003), 0x7e000001, STR(__LINE__)); // 0x1.000004p+126 + -0x1.000006p+125 = 0x1.000002p+125
+}
+
+void f357(void) {
+ comp32(single_of_bits(0xfe000003) + single_of_bits(0x7e800002), 0x7e000001, STR(__LINE__)); // -0x1.000006p+125 + 0x1.000004p+126 = 0x1.000002p+125
+ comp32(single_of_bits(0x01800002) + single_of_bits(0x81000003), 0x01000001, STR(__LINE__)); // 0x1.000004p-124 + -0x1.000006p-125 = 0x1.000002p-125
+ comp32(single_of_bits(0x81000003) + single_of_bits(0x01800002), 0x01000001, STR(__LINE__)); // -0x1.000006p-125 + 0x1.000004p-124 = 0x1.000002p-125
+ comp32(single_of_bits(0xc0000002) + single_of_bits(0x3f800003), 0xbf800001, STR(__LINE__)); // -0x1.000004p+1 + 0x1.000006p+0 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f800003) + single_of_bits(0xc0000002), 0xbf800001, STR(__LINE__)); // 0x1.000006p+0 + -0x1.000004p+1 = -0x1.000002p+0
+ comp32(single_of_bits(0xfe800002) + single_of_bits(0x7e000003), 0xfe000001, STR(__LINE__)); // -0x1.000004p+126 + 0x1.000006p+125 = -0x1.000002p+125
+ comp32(single_of_bits(0x7e000003) + single_of_bits(0xfe800002), 0xfe000001, STR(__LINE__)); // 0x1.000006p+125 + -0x1.000004p+126 = -0x1.000002p+125
+ comp32(single_of_bits(0x81800002) + single_of_bits(0x01000003), 0x81000001, STR(__LINE__)); // -0x1.000004p-124 + 0x1.000006p-125 = -0x1.000002p-125
+ comp32(single_of_bits(0x01000003) + single_of_bits(0x81800002), 0x81000001, STR(__LINE__)); // 0x1.000006p-125 + -0x1.000004p-124 = -0x1.000002p-125
+ comp32(single_of_bits(0x3f800002) + single_of_bits(0xbf800000), 0x34800000, STR(__LINE__)); // 0x1.000004p+0 + -0x1p+0 = 0x1p-22
+}
+
+void f358(void) {
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x3f800002), 0x34800000, STR(__LINE__)); // -0x1p+0 + 0x1.000004p+0 = 0x1p-22
+ comp32(single_of_bits(0xbf800002) + single_of_bits(0x3f800000), 0xb4800000, STR(__LINE__)); // -0x1.000004p+0 + 0x1p+0 = -0x1p-22
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xbf800002), 0xb4800000, STR(__LINE__)); // 0x1p+0 + -0x1.000004p+0 = -0x1p-22
+ comp32(single_of_bits(0x3f800004) + single_of_bits(0xbf800000), 0x35000000, STR(__LINE__)); // 0x1.000008p+0 + -0x1p+0 = 0x1p-21
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x3f800004), 0x35000000, STR(__LINE__)); // -0x1p+0 + 0x1.000008p+0 = 0x1p-21
+ comp32(single_of_bits(0xbf800004) + single_of_bits(0x3f800000), 0xb5000000, STR(__LINE__)); // -0x1.000008p+0 + 0x1p+0 = -0x1p-21
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xbf800004), 0xb5000000, STR(__LINE__)); // 0x1p+0 + -0x1.000008p+0 = -0x1p-21
+ comp32(single_of_bits(0x3f800008) + single_of_bits(0xbf800000), 0x35800000, STR(__LINE__)); // 0x1.00001p+0 + -0x1p+0 = 0x1p-20
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0x3f800008), 0x35800000, STR(__LINE__)); // -0x1p+0 + 0x1.00001p+0 = 0x1p-20
+ comp32(single_of_bits(0xbf800008) + single_of_bits(0x3f800000), 0xb5800000, STR(__LINE__)); // -0x1.00001p+0 + 0x1p+0 = -0x1p-20
+}
+
+void f359(void) {
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0xbf800008), 0xb5800000, STR(__LINE__)); // 0x1p+0 + -0x1.00001p+0 = -0x1p-20
+ comp32(single_of_bits(0x41880000) + single_of_bits(0xc1800000), 0x3f800000, STR(__LINE__)); // 0x1.1p+4 + -0x1p+4 = 0x1p+0
+ comp32(single_of_bits(0xc1800000) + single_of_bits(0x41880000), 0x3f800000, STR(__LINE__)); // -0x1p+4 + 0x1.1p+4 = 0x1p+0
+ comp32(single_of_bits(0xc1880000) + single_of_bits(0x41800000), 0xbf800000, STR(__LINE__)); // -0x1.1p+4 + 0x1p+4 = -0x1p+0
+ comp32(single_of_bits(0x41800000) + single_of_bits(0xc1880000), 0xbf800000, STR(__LINE__)); // 0x1p+4 + -0x1.1p+4 = -0x1p+0
+ comp32(single_of_bits(0x41800000) + single_of_bits(0xc1880000), 0xbf800000, STR(__LINE__)); // 0x1p+4 + -0x1.1p+4 = -0x1p+0
+ comp32(single_of_bits(0xc1880000) + single_of_bits(0x41800000), 0xbf800000, STR(__LINE__)); // -0x1.1p+4 + 0x1p+4 = -0x1p+0
+ comp32(single_of_bits(0x41100000) + single_of_bits(0xc1000000), 0x3f800000, STR(__LINE__)); // 0x1.2p+3 + -0x1p+3 = 0x1p+0
+ comp32(single_of_bits(0xc1000000) + single_of_bits(0x41100000), 0x3f800000, STR(__LINE__)); // -0x1p+3 + 0x1.2p+3 = 0x1p+0
+ comp32(single_of_bits(0xc1100000) + single_of_bits(0x41000000), 0xbf800000, STR(__LINE__)); // -0x1.2p+3 + 0x1p+3 = -0x1p+0
+}
+
+void f360(void) {
+ comp32(single_of_bits(0x41000000) + single_of_bits(0xc1100000), 0xbf800000, STR(__LINE__)); // 0x1p+3 + -0x1.2p+3 = -0x1p+0
+ comp32(single_of_bits(0x40a00000) + single_of_bits(0xc0800000), 0x3f800000, STR(__LINE__)); // 0x1.4p+2 + -0x1p+2 = 0x1p+0
+ comp32(single_of_bits(0xc0800000) + single_of_bits(0x40a00000), 0x3f800000, STR(__LINE__)); // -0x1p+2 + 0x1.4p+2 = 0x1p+0
+ comp32(single_of_bits(0xc0a00000) + single_of_bits(0x40800000), 0xbf800000, STR(__LINE__)); // -0x1.4p+2 + 0x1p+2 = -0x1p+0
+ comp32(single_of_bits(0x40800000) + single_of_bits(0xc0a00000), 0xbf800000, STR(__LINE__)); // 0x1p+2 + -0x1.4p+2 = -0x1p+0
+ comp32(single_of_bits(0x40400000) + single_of_bits(0xc0000000), 0x3f800000, STR(__LINE__)); // 0x1.8p+1 + -0x1p+1 = 0x1p+0
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0x40400000), 0x3f800000, STR(__LINE__)); // -0x1p+1 + 0x1.8p+1 = 0x1p+0
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0x40000000), 0xbf800000, STR(__LINE__)); // -0x1.8p+1 + 0x1p+1 = -0x1p+0
+ comp32(single_of_bits(0x40000000) + single_of_bits(0xc0400000), 0xbf800000, STR(__LINE__)); // 0x1p+1 + -0x1.8p+1 = -0x1p+0
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x3f800001), 0x40400000, STR(__LINE__)); // 0x1p+1 + 0x1.000002p+0 = 0x1.8p+1
+}
+
+void f361(void) {
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x40000000), 0x40400000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+1 = 0x1.8p+1
+ comp32(single_of_bits(0x41200000) + single_of_bits(0x3f800001), 0x41300000, STR(__LINE__)); // 0x1.4p+3 + 0x1.000002p+0 = 0x1.6p+3
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x41200000), 0x41300000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.4p+3 = 0x1.6p+3
+ comp32(single_of_bits(0x41980000) + single_of_bits(0x3f800001), 0x41a00000, STR(__LINE__)); // 0x1.3p+4 + 0x1.000002p+0 = 0x1.4p+4
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x41980000), 0x41a00000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.3p+4 = 0x1.4p+4
+ comp32(single_of_bits(0x42000000) + single_of_bits(0x3f800001), 0x42040000, STR(__LINE__)); // 0x1p+5 + 0x1.000002p+0 = 0x1.08p+5
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x42000000), 0x42040000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+5 = 0x1.08p+5
+ comp32(single_of_bits(0x42820000) + single_of_bits(0x3f800001), 0x42840000, STR(__LINE__)); // 0x1.04p+6 + 0x1.000002p+0 = 0x1.08p+6
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x42820000), 0x42840000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.04p+6 = 0x1.08p+6
+ comp32(single_of_bits(0x43050000) + single_of_bits(0x3f800001), 0x43060000, STR(__LINE__)); // 0x1.0ap+7 + 0x1.000002p+0 = 0x1.0cp+7
+}
+
+void f362(void) {
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x43050000), 0x43060000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0ap+7 = 0x1.0cp+7
+ comp32(single_of_bits(0x43820000) + single_of_bits(0x3f800001), 0x43828000, STR(__LINE__)); // 0x1.04p+8 + 0x1.000002p+0 = 0x1.05p+8
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x43820000), 0x43828000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.04p+8 = 0x1.05p+8
+ comp32(single_of_bits(0x44054000) + single_of_bits(0x3f800001), 0x44058000, STR(__LINE__)); // 0x1.0a8p+9 + 0x1.000002p+0 = 0x1.0bp+9
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x44054000), 0x44058000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0a8p+9 = 0x1.0bp+9
+ comp32(single_of_bits(0x4480a000) + single_of_bits(0x3f800001), 0x4480c000, STR(__LINE__)); // 0x1.014p+10 + 0x1.000002p+0 = 0x1.018p+10
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x4480a000), 0x4480c000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.014p+10 = 0x1.018p+10
+ comp32(single_of_bits(0x45000000) + single_of_bits(0x3f800001), 0x45001000, STR(__LINE__)); // 0x1p+11 + 0x1.000002p+0 = 0x1.002p+11
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x45000000), 0x45001000, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+11 = 0x1.002p+11
+ comp32(single_of_bits(0x45800800) + single_of_bits(0x3f800001), 0x45801000, STR(__LINE__)); // 0x1.001p+12 + 0x1.000002p+0 = 0x1.002p+12
+}
+
+void f363(void) {
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x45800800), 0x45801000, STR(__LINE__)); // 0x1.000002p+0 + 0x1.001p+12 = 0x1.002p+12
+ comp32(single_of_bits(0x46000400) + single_of_bits(0x3f800001), 0x46000800, STR(__LINE__)); // 0x1.0008p+13 + 0x1.000002p+0 = 0x1.001p+13
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x46000400), 0x46000800, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0008p+13 = 0x1.001p+13
+ comp32(single_of_bits(0x46800000) + single_of_bits(0x3f800001), 0x46800200, STR(__LINE__)); // 0x1p+14 + 0x1.000002p+0 = 0x1.0004p+14
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x46800000), 0x46800200, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+14 = 0x1.0004p+14
+ comp32(single_of_bits(0x47000c00) + single_of_bits(0x3f800001), 0x47000d00, STR(__LINE__)); // 0x1.0018p+15 + 0x1.000002p+0 = 0x1.001ap+15
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x47000c00), 0x47000d00, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0018p+15 = 0x1.001ap+15
+ comp32(single_of_bits(0x47800980) + single_of_bits(0x3f800001), 0x47800a00, STR(__LINE__)); // 0x1.0013p+16 + 0x1.000002p+0 = 0x1.0014p+16
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x47800980), 0x47800a00, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0013p+16 = 0x1.0014p+16
+ comp32(single_of_bits(0x48000000) + single_of_bits(0x3f800001), 0x48000040, STR(__LINE__)); // 0x1p+17 + 0x1.000002p+0 = 0x1.00008p+17
+}
+
+void f364(void) {
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x48000000), 0x48000040, STR(__LINE__)); // 0x1.000002p+0 + 0x1p+17 = 0x1.00008p+17
+ comp32(single_of_bits(0x48806b00) + single_of_bits(0x3f800001), 0x48806b20, STR(__LINE__)); // 0x1.00d6p+18 + 0x1.000002p+0 = 0x1.00d64p+18
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x48806b00), 0x48806b20, STR(__LINE__)); // 0x1.000002p+0 + 0x1.00d6p+18 = 0x1.00d64p+18
+ comp32(single_of_bits(0x49002c80) + single_of_bits(0x3f800001), 0x49002c90, STR(__LINE__)); // 0x1.0059p+19 + 0x1.000002p+0 = 0x1.00592p+19
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x49002c80), 0x49002c90, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0059p+19 = 0x1.00592p+19
+ comp32(single_of_bits(0x49802c80) + single_of_bits(0x3f800001), 0x49802c88, STR(__LINE__)); // 0x1.0059p+20 + 0x1.000002p+0 = 0x1.00591p+20
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x49802c80), 0x49802c88, STR(__LINE__)); // 0x1.000002p+0 + 0x1.0059p+20 = 0x1.00591p+20
+ comp32(single_of_bits(0x49ff74e0) + single_of_bits(0x3f800001), 0x49ff74e8, STR(__LINE__)); // 0x1.fee9cp+20 + 0x1.000002p+0 = 0x1.fee9dp+20
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x49ff74e0), 0x49ff74e8, STR(__LINE__)); // 0x1.000002p+0 + 0x1.fee9cp+20 = 0x1.fee9dp+20
+ comp32(single_of_bits(0x4a800002) + single_of_bits(0x3f800001), 0x4a800004, STR(__LINE__)); // 0x1.000004p+22 + 0x1.000002p+0 = 0x1.000008p+22
+}
+
+void f365(void) {
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x4a800002), 0x4a800004, STR(__LINE__)); // 0x1.000002p+0 + 0x1.000004p+22 = 0x1.000008p+22
+ comp32(single_of_bits(0x4b000001) + single_of_bits(0x3f800001), 0x4b000002, STR(__LINE__)); // 0x1.000002p+23 + 0x1.000002p+0 = 0x1.000004p+23
+ comp32(single_of_bits(0x3f800001) + single_of_bits(0x4b000001), 0x4b000002, STR(__LINE__)); // 0x1.000002p+0 + 0x1.000002p+23 = 0x1.000004p+23
+ comp32(single_of_bits(0x4b800000) + single_of_bits(0x40800001), 0x4b800002, STR(__LINE__)); // 0x1p+24 + 0x1.000002p+2 = 0x1.000004p+24
+ comp32(single_of_bits(0x40800001) + single_of_bits(0x4b800000), 0x4b800002, STR(__LINE__)); // 0x1.000002p+2 + 0x1p+24 = 0x1.000004p+24
+ comp32(single_of_bits(0x4b800000) + single_of_bits(0x40000001), 0x4b800001, STR(__LINE__)); // 0x1p+24 + 0x1.000002p+1 = 0x1.000002p+24
+ comp32(single_of_bits(0x40000001) + single_of_bits(0x4b800000), 0x4b800001, STR(__LINE__)); // 0x1.000002p+1 + 0x1p+24 = 0x1.000002p+24
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xbf800001), 0xc0400000, STR(__LINE__)); // -0x1p+1 + -0x1.000002p+0 = -0x1.8p+1
+ comp32(single_of_bits(0xbf800001) + single_of_bits(0xc0000000), 0xc0400000, STR(__LINE__)); // -0x1.000002p+0 + -0x1p+1 = -0x1.8p+1
+ comp32(single_of_bits(0xcb800000) + single_of_bits(0xc0800001), 0xcb800002, STR(__LINE__)); // -0x1p+24 + -0x1.000002p+2 = -0x1.000004p+24
+}
+
+void f366(void) {
+ comp32(single_of_bits(0xc0800001) + single_of_bits(0xcb800000), 0xcb800002, STR(__LINE__)); // -0x1.000002p+2 + -0x1p+24 = -0x1.000004p+24
+ comp32(single_of_bits(0xcb800000) + single_of_bits(0xc0000001), 0xcb800001, STR(__LINE__)); // -0x1p+24 + -0x1.000002p+1 = -0x1.000002p+24
+ comp32(single_of_bits(0xc0000001) + single_of_bits(0xcb800000), 0xcb800001, STR(__LINE__)); // -0x1.000002p+1 + -0x1p+24 = -0x1.000002p+24
+ comp32(single_of_bits(0x46fffe00) + single_of_bits(0x3f800000), 0x47000000, STR(__LINE__)); // 0x1.fffcp+14 + 0x1p+0 = 0x1p+15
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x46fffe00), 0x47000000, STR(__LINE__)); // 0x1p+0 + 0x1.fffcp+14 = 0x1p+15
+ comp32(single_of_bits(0xc6fffe00) + single_of_bits(0xbf800000), 0xc7000000, STR(__LINE__)); // -0x1.fffcp+14 + -0x1p+0 = -0x1p+15
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xc6fffe00), 0xc7000000, STR(__LINE__)); // -0x1p+0 + -0x1.fffcp+14 = -0x1p+15
+ comp32(single_of_bits(0x4b7fffff) + single_of_bits(0x3f800000), 0x4b800000, STR(__LINE__)); // 0x1.fffffep+23 + 0x1p+0 = 0x1p+24
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x4b7fffff), 0x4b800000, STR(__LINE__)); // 0x1p+0 + 0x1.fffffep+23 = 0x1p+24
+ comp32(single_of_bits(0xcb7fffff) + single_of_bits(0xbf800000), 0xcb800000, STR(__LINE__)); // -0x1.fffffep+23 + -0x1p+0 = -0x1p+24
+}
+
+void f367(void) {
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xcb7fffff), 0xcb800000, STR(__LINE__)); // -0x1p+0 + -0x1.fffffep+23 = -0x1p+24
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x41700000), 0x41800000, STR(__LINE__)); // 0x1p+0 + 0x1.ep+3 = 0x1p+4
+ comp32(single_of_bits(0x41700000) + single_of_bits(0x3f800000), 0x41800000, STR(__LINE__)); // 0x1.ep+3 + 0x1p+0 = 0x1p+4
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xc1700000), 0xc1800000, STR(__LINE__)); // -0x1p+0 + -0x1.ep+3 = -0x1p+4
+ comp32(single_of_bits(0xc1700000) + single_of_bits(0xbf800000), 0xc1800000, STR(__LINE__)); // -0x1.ep+3 + -0x1p+0 = -0x1p+4
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x4b7ffffb), 0x4b7ffffd, STR(__LINE__)); // 0x1p+1 + 0x1.fffff6p+23 = 0x1.fffffap+23
+ comp32(single_of_bits(0x4b7ffffb) + single_of_bits(0x40000000), 0x4b7ffffd, STR(__LINE__)); // 0x1.fffff6p+23 + 0x1p+1 = 0x1.fffffap+23
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xcb7ffffb), 0xcb7ffffd, STR(__LINE__)); // -0x1p+1 + -0x1.fffff6p+23 = -0x1.fffffap+23
+ comp32(single_of_bits(0xcb7ffffb) + single_of_bits(0xc0000000), 0xcb7ffffd, STR(__LINE__)); // -0x1.fffff6p+23 + -0x1p+1 = -0x1.fffffap+23
+ comp32(single_of_bits(0x007fffff) + single_of_bits(0x00000001), 0x00800000, STR(__LINE__)); // 0x1.fffffcp-127 + 0x1p-149 = 0x1p-126
+}
+
+void f368(void) {
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x007fffff), 0x00800000, STR(__LINE__)); // 0x1p-149 + 0x1.fffffcp-127 = 0x1p-126
+ comp32(single_of_bits(0x807fffff) + single_of_bits(0x80000001), 0x80800000, STR(__LINE__)); // -0x1.fffffcp-127 + -0x1p-149 = -0x1p-126
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x807fffff), 0x80800000, STR(__LINE__)); // -0x1p-149 + -0x1.fffffcp-127 = -0x1p-126
+ comp32(single_of_bits(0x00700000) + single_of_bits(0x00100000), 0x00800000, STR(__LINE__)); // 0x1.cp-127 + 0x1p-129 = 0x1p-126
+ comp32(single_of_bits(0x00100000) + single_of_bits(0x00700000), 0x00800000, STR(__LINE__)); // 0x1p-129 + 0x1.cp-127 = 0x1p-126
+ comp32(single_of_bits(0x80700000) + single_of_bits(0x80100000), 0x80800000, STR(__LINE__)); // -0x1.cp-127 + -0x1p-129 = -0x1p-126
+ comp32(single_of_bits(0x80100000) + single_of_bits(0x80700000), 0x80800000, STR(__LINE__)); // -0x1p-129 + -0x1.cp-127 = -0x1p-126
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x46fffa00), 0x46fffe00, STR(__LINE__)); // 0x1p+1 + 0x1.fff4p+14 = 0x1.fffcp+14
+ comp32(single_of_bits(0x46fffa00) + single_of_bits(0x40000000), 0x46fffe00, STR(__LINE__)); // 0x1.fff4p+14 + 0x1p+1 = 0x1.fffcp+14
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xc6fffa00), 0xc6fffe00, STR(__LINE__)); // -0x1p+1 + -0x1.fff4p+14 = -0x1.fffcp+14
+}
+
+void f369(void) {
+ comp32(single_of_bits(0xc6fffa00) + single_of_bits(0xc0000000), 0xc6fffe00, STR(__LINE__)); // -0x1.fff4p+14 + -0x1p+1 = -0x1.fffcp+14
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x4b7ffffe), 0x4b7fffff, STR(__LINE__)); // 0x1p+0 + 0x1.fffffcp+23 = 0x1.fffffep+23
+ comp32(single_of_bits(0x4b7ffffe) + single_of_bits(0x3f800000), 0x4b7fffff, STR(__LINE__)); // 0x1.fffffcp+23 + 0x1p+0 = 0x1.fffffep+23
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xcb7ffffe), 0xcb7fffff, STR(__LINE__)); // -0x1p+0 + -0x1.fffffcp+23 = -0x1.fffffep+23
+ comp32(single_of_bits(0xcb7ffffe) + single_of_bits(0xbf800000), 0xcb7fffff, STR(__LINE__)); // -0x1.fffffcp+23 + -0x1p+0 = -0x1.fffffep+23
+ comp32(single_of_bits(0x40400000) + single_of_bits(0x4b7ffffa), 0x4b7ffffd, STR(__LINE__)); // 0x1.8p+1 + 0x1.fffff4p+23 = 0x1.fffffap+23
+ comp32(single_of_bits(0x4b7ffffa) + single_of_bits(0x40400000), 0x4b7ffffd, STR(__LINE__)); // 0x1.fffff4p+23 + 0x1.8p+1 = 0x1.fffffap+23
+ comp32(single_of_bits(0xc0400000) + single_of_bits(0xcb7ffffa), 0xcb7ffffd, STR(__LINE__)); // -0x1.8p+1 + -0x1.fffff4p+23 = -0x1.fffffap+23
+ comp32(single_of_bits(0xcb7ffffa) + single_of_bits(0xc0400000), 0xcb7ffffd, STR(__LINE__)); // -0x1.fffff4p+23 + -0x1.8p+1 = -0x1.fffffap+23
+ comp32(single_of_bits(0x3f800000) + single_of_bits(0x41600000), 0x41700000, STR(__LINE__)); // 0x1p+0 + 0x1.cp+3 = 0x1.ep+3
+}
+
+void f370(void) {
+ comp32(single_of_bits(0x41600000) + single_of_bits(0x3f800000), 0x41700000, STR(__LINE__)); // 0x1.cp+3 + 0x1p+0 = 0x1.ep+3
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xc1600000), 0xc1700000, STR(__LINE__)); // -0x1p+0 + -0x1.cp+3 = -0x1.ep+3
+ comp32(single_of_bits(0xc1600000) + single_of_bits(0xbf800000), 0xc1700000, STR(__LINE__)); // -0x1.cp+3 + -0x1p+0 = -0x1.ep+3
+ comp32(single_of_bits(0x40000000) + single_of_bits(0x41500000), 0x41700000, STR(__LINE__)); // 0x1p+1 + 0x1.ap+3 = 0x1.ep+3
+ comp32(single_of_bits(0x41500000) + single_of_bits(0x40000000), 0x41700000, STR(__LINE__)); // 0x1.ap+3 + 0x1p+1 = 0x1.ep+3
+ comp32(single_of_bits(0xc0000000) + single_of_bits(0xc1500000), 0xc1700000, STR(__LINE__)); // -0x1p+1 + -0x1.ap+3 = -0x1.ep+3
+ comp32(single_of_bits(0xc1500000) + single_of_bits(0xc0000000), 0xc1700000, STR(__LINE__)); // -0x1.ap+3 + -0x1p+1 = -0x1.ep+3
+ comp32(single_of_bits(0x007ffffe) + single_of_bits(0x00000001), 0x007fffff, STR(__LINE__)); // 0x1.fffff8p-127 + 0x1p-149 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x00000001) + single_of_bits(0x007ffffe), 0x007fffff, STR(__LINE__)); // 0x1p-149 + 0x1.fffff8p-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x807ffffe) + single_of_bits(0x80000001), 0x807fffff, STR(__LINE__)); // -0x1.fffff8p-127 + -0x1p-149 = -0x1.fffffcp-127
+}
+
+void f371(void) {
+ comp32(single_of_bits(0x80000001) + single_of_bits(0x807ffffe), 0x807fffff, STR(__LINE__)); // -0x1p-149 + -0x1.fffff8p-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00600000) + single_of_bits(0x00100000), 0x00700000, STR(__LINE__)); // 0x1.8p-127 + 0x1p-129 = 0x1.cp-127
+ comp32(single_of_bits(0x00100000) + single_of_bits(0x00600000), 0x00700000, STR(__LINE__)); // 0x1p-129 + 0x1.8p-127 = 0x1.cp-127
+ comp32(single_of_bits(0x80600000) + single_of_bits(0x80100000), 0x80700000, STR(__LINE__)); // -0x1.8p-127 + -0x1p-129 = -0x1.cp-127
+ comp32(single_of_bits(0x80100000) + single_of_bits(0x80600000), 0x80700000, STR(__LINE__)); // -0x1p-129 + -0x1.8p-127 = -0x1.cp-127
+ comp32(single_of_bits(0xfeffffff) + single_of_bits(0xbf800000), 0xfeffffff, STR(__LINE__)); // -0x1.fffffep+126 + -0x1p+0 = -0x1.fffffep+126
+ comp32(single_of_bits(0xbf800000) + single_of_bits(0xfeffffff), 0xfeffffff, STR(__LINE__)); // -0x1p+0 + -0x1.fffffep+126 = -0x1.fffffep+126
+ comp32(single_of_bits(0x47000000) / single_of_bits(0x42000000), 0x44800000, STR(__LINE__)); // 0x1p+15 / 0x1p+5 = 0x1p+10
+ comp32(single_of_bits(0xc7000000) / single_of_bits(0xc2000000), 0x44800000, STR(__LINE__)); // -0x1p+15 / -0x1p+5 = 0x1p+10
+ comp32(single_of_bits(0x47000000) / single_of_bits(0xc2000000), 0xc4800000, STR(__LINE__)); // 0x1p+15 / -0x1p+5 = -0x1p+10
+}
+
+void f372(void) {
+ comp32(single_of_bits(0xc7000000) / single_of_bits(0x42000000), 0xc4800000, STR(__LINE__)); // -0x1p+15 / 0x1p+5 = -0x1p+10
+ comp32(single_of_bits(0x7b800000) / single_of_bits(0x49800000), 0x71800000, STR(__LINE__)); // 0x1p+120 / 0x1p+20 = 0x1p+100
+ comp32(single_of_bits(0xfb800000) / single_of_bits(0xc9800000), 0x71800000, STR(__LINE__)); // -0x1p+120 / -0x1p+20 = 0x1p+100
+ comp32(single_of_bits(0xfb800000) / single_of_bits(0x49800000), 0xf1800000, STR(__LINE__)); // -0x1p+120 / 0x1p+20 = -0x1p+100
+ comp32(single_of_bits(0x7b800000) / single_of_bits(0xc9800000), 0xf1800000, STR(__LINE__)); // 0x1p+120 / -0x1p+20 = -0x1p+100
+ comp32(single_of_bits(0x5f000000) / single_of_bits(0x4b000000), 0x53800000, STR(__LINE__)); // 0x1p+63 / 0x1p+23 = 0x1p+40
+ comp32(single_of_bits(0xdf000000) / single_of_bits(0xcb000000), 0x53800000, STR(__LINE__)); // -0x1p+63 / -0x1p+23 = 0x1p+40
+ comp32(single_of_bits(0xdf000000) / single_of_bits(0x4b000000), 0xd3800000, STR(__LINE__)); // -0x1p+63 / 0x1p+23 = -0x1p+40
+ comp32(single_of_bits(0x5f000000) / single_of_bits(0xcb000000), 0xd3800000, STR(__LINE__)); // 0x1p+63 / -0x1p+23 = -0x1p+40
+ comp32(single_of_bits(0x57000000) / single_of_bits(0x46000000), 0x50800000, STR(__LINE__)); // 0x1p+47 / 0x1p+13 = 0x1p+34
+}
+
+void f373(void) {
+ comp32(single_of_bits(0xd7000000) / single_of_bits(0xc6000000), 0x50800000, STR(__LINE__)); // -0x1p+47 / -0x1p+13 = 0x1p+34
+ comp32(single_of_bits(0xd7000000) / single_of_bits(0x46000000), 0xd0800000, STR(__LINE__)); // -0x1p+47 / 0x1p+13 = -0x1p+34
+ comp32(single_of_bits(0x57000000) / single_of_bits(0xc6000000), 0xd0800000, STR(__LINE__)); // 0x1p+47 / -0x1p+13 = -0x1p+34
+ comp32(single_of_bits(0x42200000) / single_of_bits(0x41200000), 0x40800000, STR(__LINE__)); // 0x1.4p+5 / 0x1.4p+3 = 0x1p+2
+ comp32(single_of_bits(0xc2200000) / single_of_bits(0xc1200000), 0x40800000, STR(__LINE__)); // -0x1.4p+5 / -0x1.4p+3 = 0x1p+2
+ comp32(single_of_bits(0xc2200000) / single_of_bits(0x41200000), 0xc0800000, STR(__LINE__)); // -0x1.4p+5 / 0x1.4p+3 = -0x1p+2
+ comp32(single_of_bits(0x42200000) / single_of_bits(0xc1200000), 0xc0800000, STR(__LINE__)); // 0x1.4p+5 / -0x1.4p+3 = -0x1p+2
+ comp32(single_of_bits(0x46fff000) / single_of_bits(0x41200000), 0x454cc000, STR(__LINE__)); // 0x1.ffep+14 / 0x1.4p+3 = 0x1.998p+11
+ comp32(single_of_bits(0x461c4000) / single_of_bits(0x41200000), 0x447a0000, STR(__LINE__)); // 0x1.388p+13 / 0x1.4p+3 = 0x1.f4p+9
+ comp32(single_of_bits(0x461c4000) / single_of_bits(0x42c80000), 0x42c80000, STR(__LINE__)); // 0x1.388p+13 / 0x1.9p+6 = 0x1.9p+6
+}
+
+void f374(void) {
+ comp32(single_of_bits(0x461c4000) / single_of_bits(0x447a0000), 0x41200000, STR(__LINE__)); // 0x1.388p+13 / 0x1.f4p+9 = 0x1.4p+3
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f800000), 0x3f800000, STR(__LINE__)); // 0x1p+0 / 0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0xbf800000), 0x3f800000, STR(__LINE__)); // -0x1p+0 / -0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0xbf800000), 0xbf800000, STR(__LINE__)); // 0x1p+0 / -0x1p+0 = -0x1p+0
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0x3f800000), 0xbf800000, STR(__LINE__)); // -0x1p+0 / 0x1p+0 = -0x1p+0
+ comp32(single_of_bits(0x40000000) / single_of_bits(0x3f800000), 0x40000000, STR(__LINE__)); // 0x1p+1 / 0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0xc0000000) / single_of_bits(0xbf800000), 0x40000000, STR(__LINE__)); // -0x1p+1 / -0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0xc0000000) / single_of_bits(0x3f800000), 0xc0000000, STR(__LINE__)); // -0x1p+1 / 0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0x40000000) / single_of_bits(0xbf800000), 0xc0000000, STR(__LINE__)); // 0x1p+1 / -0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x3f800000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1p+0 = 0x1.fffffcp-127
+}
+
+void f375(void) {
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xbf800000), 0x807fffff, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x3f800000), 0x00000001, STR(__LINE__)); // 0x1p-149 / 0x1p+0 = 0x1p-149
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xbf800000), 0x00000001, STR(__LINE__)); // -0x1p-149 / -0x1p+0 = 0x1p-149
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x3f800000), 0x80000001, STR(__LINE__)); // -0x1p-149 / 0x1p+0 = -0x1p-149
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xbf800000), 0x80000001, STR(__LINE__)); // 0x1p-149 / -0x1p+0 = -0x1p-149
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x40000000), 0x7e800000, STR(__LINE__)); // 0x1p+127 / 0x1p+1 = 0x1p+126
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0xc0000000), 0xfe800000, STR(__LINE__)); // 0x1p+127 / -0x1p+1 = -0x1p+126
+ comp32(single_of_bits(0xfeffffff) / single_of_bits(0x40000000), 0xfe7fffff, STR(__LINE__)); // -0x1.fffffep+126 / 0x1p+1 = -0x1.fffffep+125
+ comp32(single_of_bits(0x7efffffd) / single_of_bits(0xc0000000), 0xfe7ffffd, STR(__LINE__)); // 0x1.fffffap+126 / -0x1p+1 = -0x1.fffffap+125
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0xc0000000), 0xfeffffff, STR(__LINE__)); // 0x1.fffffep+127 / -0x1p+1 = -0x1.fffffep+126
+}
+
+void f376(void) {
+ comp32(single_of_bits(0x41000000) / single_of_bits(0x40000000), 0x40800000, STR(__LINE__)); // 0x1p+3 / 0x1p+1 = 0x1p+2
+ comp32(single_of_bits(0xc1000000) / single_of_bits(0xc0000000), 0x40800000, STR(__LINE__)); // -0x1p+3 / -0x1p+1 = 0x1p+2
+ comp32(single_of_bits(0xc1000000) / single_of_bits(0x40000000), 0xc0800000, STR(__LINE__)); // -0x1p+3 / 0x1p+1 = -0x1p+2
+ comp32(single_of_bits(0x41000000) / single_of_bits(0xc0000000), 0xc0800000, STR(__LINE__)); // 0x1p+3 / -0x1p+1 = -0x1p+2
+ comp32(single_of_bits(0x01000000) / single_of_bits(0xc0000000), 0x80800000, STR(__LINE__)); // 0x1p-125 / -0x1p+1 = -0x1p-126
+ comp32(single_of_bits(0x01000003) / single_of_bits(0xc0000000), 0x80800003, STR(__LINE__)); // 0x1.000006p-125 / -0x1p+1 = -0x1.000006p-126
+ comp32(single_of_bits(0x01000001) / single_of_bits(0xc0000000), 0x80800001, STR(__LINE__)); // 0x1.000002p-125 / -0x1p+1 = -0x1.000002p-126
+ comp32(single_of_bits(0x00fffffe) / single_of_bits(0x40000000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-126 / 0x1p+1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x40000000), 0x00200000, STR(__LINE__)); // 0x1p-127 / 0x1p+1 = 0x1p-128
+ comp32(single_of_bits(0x80400000) / single_of_bits(0xc0000000), 0x00200000, STR(__LINE__)); // -0x1p-127 / -0x1p+1 = 0x1p-128
+}
+
+void f377(void) {
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x40000000), 0x80200000, STR(__LINE__)); // -0x1p-127 / 0x1p+1 = -0x1p-128
+ comp32(single_of_bits(0x00400000) / single_of_bits(0xc0000000), 0x80200000, STR(__LINE__)); // 0x1p-127 / -0x1p+1 = -0x1p-128
+ comp32(single_of_bits(0x00000002) / single_of_bits(0x40000000), 0x00000001, STR(__LINE__)); // 0x1p-148 / 0x1p+1 = 0x1p-149
+ comp32(single_of_bits(0x80000002) / single_of_bits(0xc0000000), 0x00000001, STR(__LINE__)); // -0x1p-148 / -0x1p+1 = 0x1p-149
+ comp32(single_of_bits(0x80000002) / single_of_bits(0x40000000), 0x80000001, STR(__LINE__)); // -0x1p-148 / 0x1p+1 = -0x1p-149
+ comp32(single_of_bits(0x00000002) / single_of_bits(0xc0000000), 0x80000001, STR(__LINE__)); // 0x1p-148 / -0x1p+1 = -0x1p-149
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0x7effffff), 0x40000000, STR(__LINE__)); // 0x1.fffffep+127 / 0x1.fffffep+126 = 0x1p+1
+ comp32(single_of_bits(0xff000001) / single_of_bits(0x7e800001), 0xc0000000, STR(__LINE__)); // -0x1.000002p+127 / 0x1.000002p+126 = -0x1p+1
+ comp32(single_of_bits(0x7f000003) / single_of_bits(0xfe800003), 0xc0000000, STR(__LINE__)); // 0x1.000006p+127 / -0x1.000006p+126 = -0x1p+1
+ comp32(single_of_bits(0x01000000) / single_of_bits(0x00800000), 0x40000000, STR(__LINE__)); // 0x1p-125 / 0x1p-126 = 0x1p+1
+}
+
+void f378(void) {
+ comp32(single_of_bits(0x81000001) / single_of_bits(0x00800001), 0xc0000000, STR(__LINE__)); // -0x1.000002p-125 / 0x1.000002p-126 = -0x1p+1
+ comp32(single_of_bits(0x01000001) / single_of_bits(0x00800001), 0x40000000, STR(__LINE__)); // 0x1.000002p-125 / 0x1.000002p-126 = 0x1p+1
+ comp32(single_of_bits(0x01000003) / single_of_bits(0x80800003), 0xc0000000, STR(__LINE__)); // 0x1.000006p-125 / -0x1.000006p-126 = -0x1p+1
+ comp32(single_of_bits(0x81000005) / single_of_bits(0x00800005), 0xc0000000, STR(__LINE__)); // -0x1.00000ap-125 / 0x1.00000ap-126 = -0x1p+1
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x00200000), 0x40000000, STR(__LINE__)); // 0x1p-127 / 0x1p-128 = 0x1p+1
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x80200000), 0x40000000, STR(__LINE__)); // -0x1p-127 / -0x1p-128 = 0x1p+1
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x00200000), 0xc0000000, STR(__LINE__)); // -0x1p-127 / 0x1p-128 = -0x1p+1
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x80200000), 0xc0000000, STR(__LINE__)); // 0x1p-127 / -0x1p-128 = -0x1p+1
+ comp32(single_of_bits(0x00000002) / single_of_bits(0x00000001), 0x40000000, STR(__LINE__)); // 0x1p-148 / 0x1p-149 = 0x1p+1
+ comp32(single_of_bits(0x80000002) / single_of_bits(0x80000001), 0x40000000, STR(__LINE__)); // -0x1p-148 / -0x1p-149 = 0x1p+1
+}
+
+void f379(void) {
+ comp32(single_of_bits(0x00000002) / single_of_bits(0x80000001), 0xc0000000, STR(__LINE__)); // 0x1p-148 / -0x1p-149 = -0x1p+1
+ comp32(single_of_bits(0x80000002) / single_of_bits(0x00000001), 0xc0000000, STR(__LINE__)); // -0x1p-148 / 0x1p-149 = -0x1p+1
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x3f000000), 0x40c00000, STR(__LINE__)); // 0x1.8p+1 / 0x1p-1 = 0x1.8p+2
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0xbf000000), 0x40c00000, STR(__LINE__)); // -0x1.8p+1 / -0x1p-1 = 0x1.8p+2
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0x3f000000), 0xc0c00000, STR(__LINE__)); // -0x1.8p+1 / 0x1p-1 = -0x1.8p+2
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xbf000000), 0xc0c00000, STR(__LINE__)); // 0x1.8p+1 / -0x1p-1 = -0x1.8p+2
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x3f000000), 0x00fffffe, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1p-1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xbf000000), 0x00fffffe, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1p-1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x3f000000), 0x80fffffe, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1p-1 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xbf000000), 0x80fffffe, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1p-1 = -0x1.fffffcp-126
+}
+
+void f380(void) {
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x3f000000), 0x00000002, STR(__LINE__)); // 0x1p-149 / 0x1p-1 = 0x1p-148
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xbf000000), 0x00000002, STR(__LINE__)); // -0x1p-149 / -0x1p-1 = 0x1p-148
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x3f000000), 0x80000002, STR(__LINE__)); // -0x1p-149 / 0x1p-1 = -0x1p-148
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xbf000000), 0x80000002, STR(__LINE__)); // 0x1p-149 / -0x1p-1 = -0x1p-148
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x40c00000), 0x3f000000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+2 = 0x1p-1
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0xc0c00000), 0x3f000000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+2 = 0x1p-1
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0x40c00000), 0xbf000000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+2 = -0x1p-1
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xc0c00000), 0xbf000000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+2 = -0x1p-1
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x00fffffe), 0x3f000000, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1.fffffcp-126 = 0x1p-1
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x80fffffe), 0x3f000000, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1.fffffcp-126 = 0x1p-1
+}
+
+void f381(void) {
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x00fffffe), 0xbf000000, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1.fffffcp-126 = -0x1p-1
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x80fffffe), 0xbf000000, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1.fffffcp-126 = -0x1p-1
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x00000002), 0x3f000000, STR(__LINE__)); // 0x1p-149 / 0x1p-148 = 0x1p-1
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x00000002), 0xbf000000, STR(__LINE__)); // -0x1p-149 / 0x1p-148 = -0x1p-1
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x80000002), 0x3f000000, STR(__LINE__)); // -0x1p-149 / -0x1p-148 = 0x1p-1
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x80000002), 0xbf000000, STR(__LINE__)); // 0x1p-149 / -0x1p-148 = -0x1p-1
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x3b000000), 0x44c00000, STR(__LINE__)); // 0x1.8p+1 / 0x1p-9 = 0x1.8p+10
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0xbb000000), 0x44c00000, STR(__LINE__)); // -0x1.8p+1 / -0x1p-9 = 0x1.8p+10
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0x3b000000), 0xc4c00000, STR(__LINE__)); // -0x1.8p+1 / 0x1p-9 = -0x1.8p+10
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xbb000000), 0xc4c00000, STR(__LINE__)); // 0x1.8p+1 / -0x1p-9 = -0x1.8p+10
+}
+
+void f382(void) {
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x3b000000), 0x04fffffe, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1p-9 = 0x1.fffffcp-118
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xbb000000), 0x04fffffe, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1p-9 = 0x1.fffffcp-118
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x3b000000), 0x84fffffe, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1p-9 = -0x1.fffffcp-118
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xbb000000), 0x84fffffe, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1p-9 = -0x1.fffffcp-118
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x3e000000), 0x00000008, STR(__LINE__)); // 0x1p-149 / 0x1p-3 = 0x1p-146
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xbe000000), 0x00000008, STR(__LINE__)); // -0x1p-149 / -0x1p-3 = 0x1p-146
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x3e000000), 0x80000008, STR(__LINE__)); // -0x1p-149 / 0x1p-3 = -0x1p-146
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xbe000000), 0x80000008, STR(__LINE__)); // 0x1p-149 / -0x1p-3 = -0x1p-146
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x44c00000), 0x3b000000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+10 = 0x1p-9
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0x44c00000), 0xbb000000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+10 = -0x1p-9
+}
+
+void f383(void) {
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0xc4c00000), 0x3b000000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+10 = 0x1p-9
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xc4c00000), 0xbb000000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+10 = -0x1p-9
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x04fffffe), 0x3b000000, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1.fffffcp-118 = 0x1p-9
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x04fffffe), 0xbb000000, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1.fffffcp-118 = -0x1p-9
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x84fffffe), 0x3b000000, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1.fffffcp-118 = 0x1p-9
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x84fffffe), 0xbb000000, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1.fffffcp-118 = -0x1p-9
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x00000008), 0x3e000000, STR(__LINE__)); // 0x1p-149 / 0x1p-146 = 0x1p-3
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x00000008), 0xbe000000, STR(__LINE__)); // -0x1p-149 / 0x1p-146 = -0x1p-3
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x80000008), 0x3e000000, STR(__LINE__)); // -0x1p-149 / -0x1p-146 = 0x1p-3
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x80000008), 0xbe000000, STR(__LINE__)); // 0x1p-149 / -0x1p-146 = -0x1p-3
+}
+
+void f384(void) {
+ comp32(single_of_bits(0x41100000) / single_of_bits(0x40400000), 0x40400000, STR(__LINE__)); // 0x1.2p+3 / 0x1.8p+1 = 0x1.8p+1
+ comp32(single_of_bits(0xc1100000) / single_of_bits(0xc0400000), 0x40400000, STR(__LINE__)); // -0x1.2p+3 / -0x1.8p+1 = 0x1.8p+1
+ comp32(single_of_bits(0xc1100000) / single_of_bits(0x40400000), 0xc0400000, STR(__LINE__)); // -0x1.2p+3 / 0x1.8p+1 = -0x1.8p+1
+ comp32(single_of_bits(0x41100000) / single_of_bits(0xc0400000), 0xc0400000, STR(__LINE__)); // 0x1.2p+3 / -0x1.8p+1 = -0x1.8p+1
+ comp32(single_of_bits(0x40c00000) / single_of_bits(0x40400000), 0x40000000, STR(__LINE__)); // 0x1.8p+2 / 0x1.8p+1 = 0x1p+1
+ comp32(single_of_bits(0xc0c00000) / single_of_bits(0xc0400000), 0x40000000, STR(__LINE__)); // -0x1.8p+2 / -0x1.8p+1 = 0x1p+1
+ comp32(single_of_bits(0x40c00000) / single_of_bits(0xc0400000), 0xc0000000, STR(__LINE__)); // 0x1.8p+2 / -0x1.8p+1 = -0x1p+1
+ comp32(single_of_bits(0xc0c00000) / single_of_bits(0x40400000), 0xc0000000, STR(__LINE__)); // -0x1.8p+2 / 0x1.8p+1 = -0x1p+1
+ comp32(single_of_bits(0x7f7ffffd) / single_of_bits(0x40800000), 0x7e7ffffd, STR(__LINE__)); // 0x1.fffffap+127 / 0x1p+2 = 0x1.fffffap+125
+ comp32(single_of_bits(0x7f7ffffd) / single_of_bits(0xc0800000), 0xfe7ffffd, STR(__LINE__)); // 0x1.fffffap+127 / -0x1p+2 = -0x1.fffffap+125
+}
+
+void f385(void) {
+ comp32(single_of_bits(0xff7ffffd) / single_of_bits(0x40800000), 0xfe7ffffd, STR(__LINE__)); // -0x1.fffffap+127 / 0x1p+2 = -0x1.fffffap+125
+ comp32(single_of_bits(0xff7ffffd) / single_of_bits(0xc0800000), 0x7e7ffffd, STR(__LINE__)); // -0x1.fffffap+127 / -0x1p+2 = 0x1.fffffap+125
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x40800000), 0x00100000, STR(__LINE__)); // 0x1p-127 / 0x1p+2 = 0x1p-129
+ comp32(single_of_bits(0x80400000) / single_of_bits(0xc0800000), 0x00100000, STR(__LINE__)); // -0x1p-127 / -0x1p+2 = 0x1p-129
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x40800000), 0x80100000, STR(__LINE__)); // -0x1p-127 / 0x1p+2 = -0x1p-129
+ comp32(single_of_bits(0x00400000) / single_of_bits(0xc0800000), 0x80100000, STR(__LINE__)); // 0x1p-127 / -0x1p+2 = -0x1p-129
+ comp32(single_of_bits(0x00000004) / single_of_bits(0x40800000), 0x00000001, STR(__LINE__)); // 0x1p-147 / 0x1p+2 = 0x1p-149
+ comp32(single_of_bits(0x80000004) / single_of_bits(0xc0800000), 0x00000001, STR(__LINE__)); // -0x1p-147 / -0x1p+2 = 0x1p-149
+ comp32(single_of_bits(0x80000004) / single_of_bits(0x40800000), 0x80000001, STR(__LINE__)); // -0x1p-147 / 0x1p+2 = -0x1p-149
+ comp32(single_of_bits(0x00000004) / single_of_bits(0xc0800000), 0x80000001, STR(__LINE__)); // 0x1p-147 / -0x1p+2 = -0x1p-149
+}
+
+void f386(void) {
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0x7e7fffff), 0x40800000, STR(__LINE__)); // 0x1.fffffep+127 / 0x1.fffffep+125 = 0x1p+2
+ comp32(single_of_bits(0xff7fffff) / single_of_bits(0x7e7fffff), 0xc0800000, STR(__LINE__)); // -0x1.fffffep+127 / 0x1.fffffep+125 = -0x1p+2
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0xfe7fffff), 0xc0800000, STR(__LINE__)); // 0x1.fffffep+127 / -0x1.fffffep+125 = -0x1p+2
+ comp32(single_of_bits(0xff7fffff) / single_of_bits(0xfe7fffff), 0x40800000, STR(__LINE__)); // -0x1.fffffep+127 / -0x1.fffffep+125 = 0x1p+2
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x00100000), 0x40800000, STR(__LINE__)); // 0x1p-127 / 0x1p-129 = 0x1p+2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x80100000), 0x40800000, STR(__LINE__)); // -0x1p-127 / -0x1p-129 = 0x1p+2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x00100000), 0xc0800000, STR(__LINE__)); // -0x1p-127 / 0x1p-129 = -0x1p+2
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x80100000), 0xc0800000, STR(__LINE__)); // 0x1p-127 / -0x1p-129 = -0x1p+2
+ comp32(single_of_bits(0x00000004) / single_of_bits(0x00000001), 0x40800000, STR(__LINE__)); // 0x1p-147 / 0x1p-149 = 0x1p+2
+ comp32(single_of_bits(0x80000004) / single_of_bits(0x80000001), 0x40800000, STR(__LINE__)); // -0x1p-147 / -0x1p-149 = 0x1p+2
+}
+
+void f387(void) {
+ comp32(single_of_bits(0x00000004) / single_of_bits(0x80000001), 0xc0800000, STR(__LINE__)); // 0x1p-147 / -0x1p-149 = -0x1p+2
+ comp32(single_of_bits(0x80000004) / single_of_bits(0x00000001), 0xc0800000, STR(__LINE__)); // -0x1p-147 / 0x1p-149 = -0x1p+2
+ comp32(single_of_bits(0x40a00000) / single_of_bits(0x40a00000), 0x3f800000, STR(__LINE__)); // 0x1.4p+2 / 0x1.4p+2 = 0x1p+0
+ comp32(single_of_bits(0xc0a00000) / single_of_bits(0xc0a00000), 0x3f800000, STR(__LINE__)); // -0x1.4p+2 / -0x1.4p+2 = 0x1p+0
+ comp32(single_of_bits(0x40a00000) / single_of_bits(0xc0a00000), 0xbf800000, STR(__LINE__)); // 0x1.4p+2 / -0x1.4p+2 = -0x1p+0
+ comp32(single_of_bits(0xc0a00000) / single_of_bits(0x40a00000), 0xbf800000, STR(__LINE__)); // -0x1.4p+2 / 0x1.4p+2 = -0x1p+0
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x40400000), 0x3f800000, STR(__LINE__)); // 0x1.8p+1 / 0x1.8p+1 = 0x1p+0
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0xc0400000), 0x3f800000, STR(__LINE__)); // -0x1.8p+1 / -0x1.8p+1 = 0x1p+0
+ comp32(single_of_bits(0xc0400000) / single_of_bits(0x40400000), 0xbf800000, STR(__LINE__)); // -0x1.8p+1 / 0x1.8p+1 = -0x1p+0
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xc0400000), 0xbf800000, STR(__LINE__)); // 0x1.8p+1 / -0x1.8p+1 = -0x1p+0
+}
+
+void f388(void) {
+ comp32(single_of_bits(0x40e00000) / single_of_bits(0x40e00000), 0x3f800000, STR(__LINE__)); // 0x1.cp+2 / 0x1.cp+2 = 0x1p+0
+ comp32(single_of_bits(0xc0e00000) / single_of_bits(0xc0e00000), 0x3f800000, STR(__LINE__)); // -0x1.cp+2 / -0x1.cp+2 = 0x1p+0
+ comp32(single_of_bits(0x40e00000) / single_of_bits(0xc0e00000), 0xbf800000, STR(__LINE__)); // 0x1.cp+2 / -0x1.cp+2 = -0x1p+0
+ comp32(single_of_bits(0xc0e00000) / single_of_bits(0x40e00000), 0xbf800000, STR(__LINE__)); // -0x1.cp+2 / 0x1.cp+2 = -0x1p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x00000001), 0x3f800000, STR(__LINE__)); // 0x1p-149 / 0x1p-149 = 0x1p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x80000001), 0x3f800000, STR(__LINE__)); // -0x1p-149 / -0x1p-149 = 0x1p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x80000001), 0xbf800000, STR(__LINE__)); // 0x1p-149 / -0x1p-149 = -0x1p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x00000001), 0xbf800000, STR(__LINE__)); // -0x1p-149 / 0x1p-149 = -0x1p+0
+ comp32(single_of_bits(0x00000009) / single_of_bits(0x41100000), 0x00000001, STR(__LINE__)); // 0x1.2p-146 / 0x1.2p+3 = 0x1p-149
+ comp32(single_of_bits(0x00000009) / single_of_bits(0xc1100000), 0x80000001, STR(__LINE__)); // 0x1.2p-146 / -0x1.2p+3 = -0x1p-149
+}
+
+void f389(void) {
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x00000000), 0x7fc00000, STR(__LINE__)); // 0x0p+0 / 0x0p+0 = nan
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x00000000), 0xffc00000, STR(__LINE__)); // -0x0p+0 / 0x0p+0 = -nan
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x80000000), 0xffc00000, STR(__LINE__)); // 0x0p+0 / -0x0p+0 = -nan
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x80000000), 0x7fc00000, STR(__LINE__)); // -0x0p+0 / -0x0p+0 = nan
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x00000003), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1.8p-148 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x80000002), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1p-148 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x80000004), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p-147 = 0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x007fffff), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x007fffff), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1.fffffcp-127 = -0x0p+0
+}
+
+void f390(void) {
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x807fffff), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1.fffffcp-127 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x807fffff), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p-149 / 0x0p+0 = inf
+ comp32(single_of_bits(0x80000003) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1.8p-148 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x00000002) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1p-148 / -0x0p+0 = -inf
+ comp32(single_of_bits(0x80000004) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1p-147 / -0x0p+0 = inf
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp-127 / 0x0p+0 = inf
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1.fffffcp-127 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1.fffffcp-127 / -0x0p+0 = -inf
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1.fffffcp-127 / -0x0p+0 = inf
+}
+
+void f391(void) {
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x3f800000), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x40000000), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1p+1 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0xc0400000), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1.8p+1 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0xc0800000), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+2 = 0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x40a00000), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.4p+2 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x40c00000), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1.8p+2 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0xc0e00000), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1.cp+2 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0xc1000000), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+3 = 0x0p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p+0 / 0x0p+0 = inf
+ comp32(single_of_bits(0xc0000000) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1p+1 / 0x0p+0 = -inf
+}
+
+void f392(void) {
+ comp32(single_of_bits(0x40400000) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1.8p+1 / -0x0p+0 = -inf
+ comp32(single_of_bits(0xc0800000) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1p+2 / -0x0p+0 = inf
+ comp32(single_of_bits(0x40a00000) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1.4p+2 / 0x0p+0 = inf
+ comp32(single_of_bits(0xc0c00000) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1.8p+2 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x40e00000) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1.cp+2 / -0x0p+0 = -inf
+ comp32(single_of_bits(0xc1000000) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1p+3 / -0x0p+0 = inf
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x7f000000), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p+127 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x7e800000), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1p+126 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0xff000000), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1p+127 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0xfe800000), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p+126 = 0x0p+0
+}
+
+void f393(void) {
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x7effffff), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.fffffep+126 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x7e7fffff), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1.fffffep+125 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0xfe7fffff), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1.fffffep+125 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0xfeffffff), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1.fffffep+126 = 0x0p+0
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p+127 / 0x0p+0 = inf
+ comp32(single_of_bits(0xfe800000) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1p+126 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1p+127 / -0x0p+0 = -inf
+ comp32(single_of_bits(0xfe800000) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1p+126 / -0x0p+0 = inf
+ comp32(single_of_bits(0x7effffff) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+126 / 0x0p+0 = inf
+ comp32(single_of_bits(0xfe7fffff) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+125 / 0x0p+0 = -inf
+}
+
+void f394(void) {
+ comp32(single_of_bits(0x7e7fffff) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1.fffffep+125 / -0x0p+0 = -inf
+ comp32(single_of_bits(0xfeffffff) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1.fffffep+126 / -0x0p+0 = inf
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x01000000), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x81000000), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x00ffffff), 0x00000000, STR(__LINE__)); // 0x0p+0 / 0x1.fffffep-126 = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x00800001), 0x80000000, STR(__LINE__)); // -0x0p+0 / 0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x80800001), 0x80000000, STR(__LINE__)); // 0x0p+0 / -0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x80ffffff), 0x00000000, STR(__LINE__)); // -0x0p+0 / -0x1.fffffep-126 = 0x0p+0
+}
+
+void f395(void) {
+ comp32(single_of_bits(0x00800000) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1p-126 / 0x0p+0 = inf
+ comp32(single_of_bits(0x81000000) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1p-125 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x01000000) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1p-125 / -0x0p+0 = -inf
+ comp32(single_of_bits(0x80800000) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1p-126 / -0x0p+0 = inf
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep-126 / 0x0p+0 = inf
+ comp32(single_of_bits(0x80800001) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -0x1.000002p-126 / 0x0p+0 = -inf
+ comp32(single_of_bits(0x00800001) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // 0x1.000002p-126 / -0x0p+0 = -inf
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -0x1.fffffep-126 / -0x0p+0 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x00000000), 0x7f800000, STR(__LINE__)); // inf / 0x0p+0 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x00000000), 0xff800000, STR(__LINE__)); // -inf / 0x0p+0 = -inf
+}
+
+void f396(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x80000000), 0xff800000, STR(__LINE__)); // inf / -0x0p+0 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x80000000), 0x7f800000, STR(__LINE__)); // -inf / -0x0p+0 = inf
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x0p+0 / inf = 0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x0p+0 / inf = -0x0p+0
+ comp32(single_of_bits(0x00000000) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x0p+0 / -inf = -0x0p+0
+ comp32(single_of_bits(0x80000000) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x0p+0 / -inf = 0x0p+0
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x00000000), 0x7fc00000, STR(__LINE__)); // nan / 0x0p+0 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x80000000), 0x7fc00000, STR(__LINE__)); // nan / -0x0p+0 = nan
+ comp32(single_of_bits(0x00000000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x0p+0 / nan = nan
+ comp32(single_of_bits(0x80000000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x0p+0 / nan = nan
+}
+
+void f397(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // inf / inf = nan
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x7f800000), 0xffc00000, STR(__LINE__)); // -inf / inf = -nan
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xff800000), 0xffc00000, STR(__LINE__)); // inf / -inf = -nan
+ comp32(single_of_bits(0xff800000) / single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // -inf / -inf = nan
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x00000001), 0x7f800000, STR(__LINE__)); // inf / 0x1p-149 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x00000003), 0xff800000, STR(__LINE__)); // -inf / 0x1.8p-148 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x80000002), 0xff800000, STR(__LINE__)); // inf / -0x1p-148 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x80000004), 0x7f800000, STR(__LINE__)); // -inf / -0x1p-147 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x007fffff), 0x7f800000, STR(__LINE__)); // inf / 0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x007fffff), 0xff800000, STR(__LINE__)); // -inf / 0x1.fffffcp-127 = -inf
+}
+
+void f398(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x807fffff), 0xff800000, STR(__LINE__)); // inf / -0x1.fffffcp-127 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x807fffff), 0x7f800000, STR(__LINE__)); // -inf / -0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1p-149 / inf = 0x0p+0
+ comp32(single_of_bits(0x80000003) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1.8p-148 / inf = -0x0p+0
+ comp32(single_of_bits(0x00000002) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1p-148 / -inf = -0x0p+0
+ comp32(single_of_bits(0x80000004) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1p-147 / -inf = 0x0p+0
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1.fffffcp-127 / inf = 0x0p+0
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1.fffffcp-127 / inf = -0x0p+0
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1.fffffcp-127 / -inf = -0x0p+0
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1.fffffcp-127 / -inf = 0x0p+0
+}
+
+void f399(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x3f800000), 0x7f800000, STR(__LINE__)); // inf / 0x1p+0 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x40000000), 0xff800000, STR(__LINE__)); // -inf / 0x1p+1 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xc0400000), 0xff800000, STR(__LINE__)); // inf / -0x1.8p+1 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0xc0800000), 0x7f800000, STR(__LINE__)); // -inf / -0x1p+2 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x40a00000), 0x7f800000, STR(__LINE__)); // inf / 0x1.4p+2 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x40c00000), 0xff800000, STR(__LINE__)); // -inf / 0x1.8p+2 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xc0e00000), 0xff800000, STR(__LINE__)); // inf / -0x1.cp+2 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0xc1000000), 0x7f800000, STR(__LINE__)); // -inf / -0x1p+3 = inf
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1p+0 / inf = 0x0p+0
+ comp32(single_of_bits(0xc0000000) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1p+1 / inf = -0x0p+0
+}
+
+void f400(void) {
+ comp32(single_of_bits(0x40400000) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1.8p+1 / -inf = -0x0p+0
+ comp32(single_of_bits(0xc0800000) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1p+2 / -inf = 0x0p+0
+ comp32(single_of_bits(0x40a00000) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1.4p+2 / inf = 0x0p+0
+ comp32(single_of_bits(0xc0c00000) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1.8p+2 / inf = -0x0p+0
+ comp32(single_of_bits(0x40e00000) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1.cp+2 / -inf = -0x0p+0
+ comp32(single_of_bits(0xc1000000) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1p+3 / -inf = 0x0p+0
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1p+127 / inf = 0x0p+0
+ comp32(single_of_bits(0xfe800000) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1p+126 / inf = -0x0p+0
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1p+127 / -inf = -0x0p+0
+ comp32(single_of_bits(0xfe800000) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1p+126 / -inf = 0x0p+0
+}
+
+void f401(void) {
+ comp32(single_of_bits(0x7effffff) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1.fffffep+126 / inf = 0x0p+0
+ comp32(single_of_bits(0xfe7fffff) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1.fffffep+125 / inf = -0x0p+0
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1.fffffep+127 / -inf = -0x0p+0
+ comp32(single_of_bits(0xff7fffff) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1.fffffep+127 / -inf = 0x0p+0
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // inf / 0x1p+127 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x7e800000), 0xff800000, STR(__LINE__)); // -inf / 0x1p+126 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // inf / -0x1p+127 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0xfe800000), 0x7f800000, STR(__LINE__)); // -inf / -0x1p+126 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x7effffff), 0x7f800000, STR(__LINE__)); // inf / 0x1.fffffep+126 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xfe7fffff), 0xff800000, STR(__LINE__)); // inf / -0x1.fffffep+125 = -inf
+}
+
+void f402(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // inf / -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0xff7fffff), 0x7f800000, STR(__LINE__)); // -inf / -0x1.fffffep+127 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x00800000), 0x7f800000, STR(__LINE__)); // inf / 0x1p-126 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x01000000), 0xff800000, STR(__LINE__)); // -inf / 0x1p-125 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x81000000), 0xff800000, STR(__LINE__)); // inf / -0x1p-125 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x80800000), 0x7f800000, STR(__LINE__)); // -inf / -0x1p-126 = inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x00ffffff), 0x7f800000, STR(__LINE__)); // inf / 0x1.fffffep-126 = inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x00800001), 0xff800000, STR(__LINE__)); // -inf / 0x1.000002p-126 = -inf
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x80800001), 0xff800000, STR(__LINE__)); // inf / -0x1.000002p-126 = -inf
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x80ffffff), 0x7f800000, STR(__LINE__)); // -inf / -0x1.fffffep-126 = inf
+}
+
+void f403(void) {
+ comp32(single_of_bits(0x00800000) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1p-126 / inf = 0x0p+0
+ comp32(single_of_bits(0x81000000) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1p-125 / inf = -0x0p+0
+ comp32(single_of_bits(0x01000000) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1p-125 / -inf = -0x0p+0
+ comp32(single_of_bits(0x80800000) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1p-126 / -inf = 0x0p+0
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0x7f800000), 0x00000000, STR(__LINE__)); // 0x1.fffffep-126 / inf = 0x0p+0
+ comp32(single_of_bits(0x80800001) / single_of_bits(0x7f800000), 0x80000000, STR(__LINE__)); // -0x1.000002p-126 / inf = -0x0p+0
+ comp32(single_of_bits(0x00800001) / single_of_bits(0xff800000), 0x80000000, STR(__LINE__)); // 0x1.000002p-126 / -inf = -0x0p+0
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0xff800000), 0x00000000, STR(__LINE__)); // -0x1.fffffep-126 / -inf = 0x0p+0
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // nan / inf = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // nan / -inf = nan
+}
+
+void f404(void) {
+ comp32(single_of_bits(0x7f800000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // inf / nan = nan
+ comp32(single_of_bits(0xff800000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -inf / nan = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // nan / nan = nan
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffcp-127 / nan = nan
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffcp-127 / nan = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x007fffff), 0x7fc00000, STR(__LINE__)); // nan / 0x1.fffffcp-127 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x807fffff), 0x7fc00000, STR(__LINE__)); // nan / -0x1.fffffcp-127 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x00000001), 0x7fc00000, STR(__LINE__)); // nan / 0x1p-149 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x80000001), 0x7fc00000, STR(__LINE__)); // nan / -0x1p-149 = nan
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p-149 / nan = nan
+}
+
+void f405(void) {
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p-149 / nan = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x3f800000), 0x7fc00000, STR(__LINE__)); // nan / 0x1p+0 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0xbf800000), 0x7fc00000, STR(__LINE__)); // nan / -0x1p+0 = nan
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p+0 / nan = nan
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p+0 / nan = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0x7f7fffff), 0x7fc00000, STR(__LINE__)); // nan / 0x1.fffffep+127 = nan
+ comp32(single_of_bits(0x7fc00000) / single_of_bits(0xff7fffff), 0x7fc00000, STR(__LINE__)); // nan / -0x1.fffffep+127 = nan
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffep+127 / nan = nan
+ comp32(single_of_bits(0xff7fffff) / single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffep+127 / nan = nan
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x3f000000), 0x7f800000, STR(__LINE__)); // 0x1p+127 / 0x1p-1 = inf
+}
+
+void f406(void) {
+ comp32(single_of_bits(0xff000000) / single_of_bits(0xbf000000), 0x7f800000, STR(__LINE__)); // -0x1p+127 / -0x1p-1 = inf
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0xbf000000), 0xff800000, STR(__LINE__)); // 0x1p+127 / -0x1p-1 = -inf
+ comp32(single_of_bits(0xff000000) / single_of_bits(0x3f000000), 0xff800000, STR(__LINE__)); // -0x1p+127 / 0x1p-1 = -inf
+ comp32(single_of_bits(0x7b000000) / single_of_bits(0x05000000), 0x7f800000, STR(__LINE__)); // 0x1p+119 / 0x1p-117 = inf
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0x00000001), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 / 0x1p-149 = inf
+ comp32(single_of_bits(0x7f000000) / single_of_bits(0x007fffff), 0x7f800000, STR(__LINE__)); // 0x1p+127 / 0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0x7f7fffff) / single_of_bits(0x3f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 / 0x1.fffffep-1 = inf
+ comp32(single_of_bits(0x00800000) / single_of_bits(0x4c000000), 0x00000000, STR(__LINE__)); // 0x1p-126 / 0x1p+25 = 0x0p+0
+ comp32(single_of_bits(0x80800000) / single_of_bits(0xcc000000), 0x00000000, STR(__LINE__)); // -0x1p-126 / -0x1p+25 = 0x0p+0
+ comp32(single_of_bits(0x00800000) / single_of_bits(0xcc000000), 0x80000000, STR(__LINE__)); // 0x1p-126 / -0x1p+25 = -0x0p+0
+}
+
+void f407(void) {
+ comp32(single_of_bits(0x80800000) / single_of_bits(0x4c000000), 0x80000000, STR(__LINE__)); // -0x1p-126 / 0x1p+25 = -0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x40800000), 0x00000000, STR(__LINE__)); // 0x1p-149 / 0x1p+2 = 0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xc0800000), 0x00000000, STR(__LINE__)); // -0x1p-149 / -0x1p+2 = 0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xc0800000), 0x80000000, STR(__LINE__)); // 0x1p-149 / -0x1p+2 = -0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x40800000), 0x80000000, STR(__LINE__)); // -0x1p-149 / 0x1p+2 = -0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x7f7fffff), 0x00000000, STR(__LINE__)); // 0x1p-149 / 0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xff7fffff), 0x00000000, STR(__LINE__)); // -0x1p-149 / -0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xff7fffff), 0x80000000, STR(__LINE__)); // 0x1p-149 / -0x1.fffffep+127 = -0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x7f7fffff), 0x80000000, STR(__LINE__)); // -0x1p-149 / 0x1.fffffep+127 = -0x0p+0
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0x4b800000), 0x00000001, STR(__LINE__)); // 0x1.fffffep-126 / 0x1p+24 = 0x1p-149
+}
+
+void f408(void) {
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0xcb800000), 0x00000001, STR(__LINE__)); // -0x1.fffffep-126 / -0x1p+24 = 0x1p-149
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0x4b800000), 0x80000001, STR(__LINE__)); // -0x1.fffffep-126 / 0x1p+24 = -0x1p-149
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0xcb800000), 0x80000001, STR(__LINE__)); // 0x1.fffffep-126 / -0x1p+24 = -0x1p-149
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x4b000000), 0x00000001, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1p+23 = 0x1p-149
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xcb000000), 0x00000001, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1p+23 = 0x1p-149
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x4b000000), 0x80000001, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1p+23 = -0x1p-149
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xcb000000), 0x80000001, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1p+23 = -0x1p-149
+ comp32(single_of_bits(0x00c00000) / single_of_bits(0x4b800000), 0x00000001, STR(__LINE__)); // 0x1.8p-126 / 0x1p+24 = 0x1p-149
+ comp32(single_of_bits(0x80c00000) / single_of_bits(0xcb800000), 0x00000001, STR(__LINE__)); // -0x1.8p-126 / -0x1p+24 = 0x1p-149
+ comp32(single_of_bits(0x80c00000) / single_of_bits(0x4b800000), 0x80000001, STR(__LINE__)); // -0x1.8p-126 / 0x1p+24 = -0x1p-149
+}
+
+void f409(void) {
+ comp32(single_of_bits(0x00c00000) / single_of_bits(0xcb800000), 0x80000001, STR(__LINE__)); // 0x1.8p-126 / -0x1p+24 = -0x1p-149
+ comp32(single_of_bits(0x00000003) / single_of_bits(0x40800000), 0x00000001, STR(__LINE__)); // 0x1.8p-148 / 0x1p+2 = 0x1p-149
+ comp32(single_of_bits(0x80000003) / single_of_bits(0xc0800000), 0x00000001, STR(__LINE__)); // -0x1.8p-148 / -0x1p+2 = 0x1p-149
+ comp32(single_of_bits(0x80000003) / single_of_bits(0x40800000), 0x80000001, STR(__LINE__)); // -0x1.8p-148 / 0x1p+2 = -0x1p-149
+ comp32(single_of_bits(0x00000003) / single_of_bits(0xc0800000), 0x80000001, STR(__LINE__)); // 0x1.8p-148 / -0x1p+2 = -0x1p-149
+ comp32(single_of_bits(0x00a00000) / single_of_bits(0x4b800000), 0x00000001, STR(__LINE__)); // 0x1.4p-126 / 0x1p+24 = 0x1p-149
+ comp32(single_of_bits(0x80a00000) / single_of_bits(0xcb800000), 0x00000001, STR(__LINE__)); // -0x1.4p-126 / -0x1p+24 = 0x1p-149
+ comp32(single_of_bits(0x80a00000) / single_of_bits(0x4b800000), 0x80000001, STR(__LINE__)); // -0x1.4p-126 / 0x1p+24 = -0x1p-149
+ comp32(single_of_bits(0x00a00000) / single_of_bits(0xcb800000), 0x80000001, STR(__LINE__)); // 0x1.4p-126 / -0x1p+24 = -0x1p-149
+ comp32(single_of_bits(0x00000005) / single_of_bits(0x41000000), 0x00000001, STR(__LINE__)); // 0x1.4p-147 / 0x1p+3 = 0x1p-149
+}
+
+void f410(void) {
+ comp32(single_of_bits(0x80000005) / single_of_bits(0xc1000000), 0x00000001, STR(__LINE__)); // -0x1.4p-147 / -0x1p+3 = 0x1p-149
+ comp32(single_of_bits(0x80000005) / single_of_bits(0x41000000), 0x80000001, STR(__LINE__)); // -0x1.4p-147 / 0x1p+3 = -0x1p-149
+ comp32(single_of_bits(0x00000005) / single_of_bits(0xc1000000), 0x80000001, STR(__LINE__)); // 0x1.4p-147 / -0x1p+3 = -0x1p-149
+ comp32(single_of_bits(0x00800000) / single_of_bits(0x4b800000), 0x00000000, STR(__LINE__)); // 0x1p-126 / 0x1p+24 = 0x0p+0
+ comp32(single_of_bits(0x80800000) / single_of_bits(0xcb800000), 0x00000000, STR(__LINE__)); // -0x1p-126 / -0x1p+24 = 0x0p+0
+ comp32(single_of_bits(0x80800000) / single_of_bits(0x4b800000), 0x80000000, STR(__LINE__)); // -0x1p-126 / 0x1p+24 = -0x0p+0
+ comp32(single_of_bits(0x00800000) / single_of_bits(0xcb800000), 0x80000000, STR(__LINE__)); // 0x1p-126 / -0x1p+24 = -0x0p+0
+ comp32(single_of_bits(0x00000001) / single_of_bits(0x40000000), 0x00000000, STR(__LINE__)); // 0x1p-149 / 0x1p+1 = 0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0xc0000000), 0x00000000, STR(__LINE__)); // -0x1p-149 / -0x1p+1 = 0x0p+0
+ comp32(single_of_bits(0x80000001) / single_of_bits(0x40000000), 0x80000000, STR(__LINE__)); // -0x1p-149 / 0x1p+1 = -0x0p+0
+}
+
+void f411(void) {
+ comp32(single_of_bits(0x00000001) / single_of_bits(0xc0000000), 0x80000000, STR(__LINE__)); // 0x1p-149 / -0x1p+1 = -0x0p+0
+ comp32(single_of_bits(0x01400000) / single_of_bits(0x4b800000), 0x00000002, STR(__LINE__)); // 0x1.8p-125 / 0x1p+24 = 0x1p-148
+ comp32(single_of_bits(0x81400000) / single_of_bits(0xcb800000), 0x00000002, STR(__LINE__)); // -0x1.8p-125 / -0x1p+24 = 0x1p-148
+ comp32(single_of_bits(0x81400000) / single_of_bits(0x4b800000), 0x80000002, STR(__LINE__)); // -0x1.8p-125 / 0x1p+24 = -0x1p-148
+ comp32(single_of_bits(0x01400000) / single_of_bits(0xcb800000), 0x80000002, STR(__LINE__)); // 0x1.8p-125 / -0x1p+24 = -0x1p-148
+ comp32(single_of_bits(0x00000003) / single_of_bits(0x40000000), 0x00000002, STR(__LINE__)); // 0x1.8p-148 / 0x1p+1 = 0x1p-148
+ comp32(single_of_bits(0x80000003) / single_of_bits(0xc0000000), 0x00000002, STR(__LINE__)); // -0x1.8p-148 / -0x1p+1 = 0x1p-148
+ comp32(single_of_bits(0x80000003) / single_of_bits(0x40000000), 0x80000002, STR(__LINE__)); // -0x1.8p-148 / 0x1p+1 = -0x1p-148
+ comp32(single_of_bits(0x00000003) / single_of_bits(0xc0000000), 0x80000002, STR(__LINE__)); // 0x1.8p-148 / -0x1p+1 = -0x1p-148
+ comp32(single_of_bits(0x00fffffd) / single_of_bits(0x40000000), 0x007ffffe, STR(__LINE__)); // 0x1.fffffap-126 / 0x1p+1 = 0x1.fffff8p-127
+}
+
+void f412(void) {
+ comp32(single_of_bits(0x80fffffd) / single_of_bits(0xc0000000), 0x007ffffe, STR(__LINE__)); // -0x1.fffffap-126 / -0x1p+1 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x80fffffd) / single_of_bits(0x40000000), 0x807ffffe, STR(__LINE__)); // -0x1.fffffap-126 / 0x1p+1 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x00fffffd) / single_of_bits(0xc0000000), 0x807ffffe, STR(__LINE__)); // 0x1.fffffap-126 / -0x1p+1 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x00f00003) / single_of_bits(0x40c00000), 0x00280000, STR(__LINE__)); // 0x1.e00006p-126 / 0x1.8p+2 = 0x1.4p-128
+ comp32(single_of_bits(0x00f00003) / single_of_bits(0x4bc00000), 0x00000001, STR(__LINE__)); // 0x1.e00006p-126 / 0x1.8p+24 = 0x1p-149
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x40000000), 0x00400000, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1p+1 = 0x1p-127
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xc0000000), 0x00400000, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1p+1 = 0x1p-127
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x40000000), 0x80400000, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1p+1 = -0x1p-127
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xc0000000), 0x80400000, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1p+1 = -0x1p-127
+ comp32(single_of_bits(0x00800000) / single_of_bits(0x3f800001), 0x007fffff, STR(__LINE__)); // 0x1p-126 / 0x1.000002p+0 = 0x1.fffffcp-127
+}
+
+void f413(void) {
+ comp32(single_of_bits(0x80800000) / single_of_bits(0xbf800001), 0x007fffff, STR(__LINE__)); // -0x1p-126 / -0x1.000002p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80800000) / single_of_bits(0x3f800001), 0x807fffff, STR(__LINE__)); // -0x1p-126 / 0x1.000002p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00800000) / single_of_bits(0xbf800001), 0x807fffff, STR(__LINE__)); // 0x1p-126 / -0x1.000002p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00800001) / single_of_bits(0x3f800002), 0x007fffff, STR(__LINE__)); // 0x1.000002p-126 / 0x1.000004p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x00800002) / single_of_bits(0x3f800006), 0x007ffffc, STR(__LINE__)); // 0x1.000004p-126 / 0x1.00000cp+0 = 0x1.fffffp-127
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x3f800001), 0x007ffffe, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1.000002p+0 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0xbf800001), 0x007ffffe, STR(__LINE__)); // -0x1.fffffcp-127 / -0x1.000002p+0 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x807fffff) / single_of_bits(0x3f800001), 0x807ffffe, STR(__LINE__)); // -0x1.fffffcp-127 / 0x1.000002p+0 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0xbf800001), 0x807ffffe, STR(__LINE__)); // 0x1.fffffcp-127 / -0x1.000002p+0 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x007ffffe) / single_of_bits(0x3f7ffffe), 0x007fffff, STR(__LINE__)); // 0x1.fffff8p-127 / 0x1.fffffcp-1 = 0x1.fffffcp-127
+}
+
+void f414(void) {
+ comp32(single_of_bits(0x007ffff7) / single_of_bits(0x3f7ffffe), 0x007ffff8, STR(__LINE__)); // 0x1.ffffdcp-127 / 0x1.fffffcp-1 = 0x1.ffffep-127
+ comp32(single_of_bits(0x807ffff8) / single_of_bits(0x3f7ffffe), 0x807ffff9, STR(__LINE__)); // -0x1.ffffep-127 / 0x1.fffffcp-1 = -0x1.ffffe4p-127
+ comp32(single_of_bits(0x007fffff) / single_of_bits(0x3f800002), 0x007ffffd, STR(__LINE__)); // 0x1.fffffcp-127 / 0x1.000004p+0 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x00bfffff) / single_of_bits(0x3fc00000), 0x007fffff, STR(__LINE__)); // 0x1.7ffffep-126 / 0x1.8p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80bfffff) / single_of_bits(0x3fc00000), 0x807fffff, STR(__LINE__)); // -0x1.7ffffep-126 / 0x1.8p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x7e800000), 0x00800000, STR(__LINE__)); // 0x1.fffffep-1 / 0x1p+126 = 0x1p-126
+ comp32(single_of_bits(0xbf7fffff) / single_of_bits(0x7e800000), 0x80800000, STR(__LINE__)); // -0x1.fffffep-1 / 0x1p+126 = -0x1p-126
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0x40000000), 0x00800000, STR(__LINE__)); // 0x1.fffffep-126 / 0x1p+1 = 0x1p-126
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0xc0000000), 0x00800000, STR(__LINE__)); // -0x1.fffffep-126 / -0x1p+1 = 0x1p-126
+ comp32(single_of_bits(0x80ffffff) / single_of_bits(0x40000000), 0x80800000, STR(__LINE__)); // -0x1.fffffep-126 / 0x1p+1 = -0x1p-126
+}
+
+void f415(void) {
+ comp32(single_of_bits(0x00ffffff) / single_of_bits(0xc0000000), 0x80800000, STR(__LINE__)); // 0x1.fffffep-126 / -0x1p+1 = -0x1p-126
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f800001), 0x3f7ffffe, STR(__LINE__)); // 0x1p+0 / 0x1.000002p+0 = 0x1.fffffcp-1
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0xbf800001), 0x3f7ffffe, STR(__LINE__)); // -0x1p+0 / -0x1.000002p+0 = 0x1.fffffcp-1
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0x3f800001), 0xbf7ffffe, STR(__LINE__)); // -0x1p+0 / 0x1.000002p+0 = -0x1.fffffcp-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0xbf800001), 0xbf7ffffe, STR(__LINE__)); // 0x1p+0 / -0x1.000002p+0 = -0x1.fffffcp-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f800002), 0x3f7ffffc, STR(__LINE__)); // 0x1p+0 / 0x1.000004p+0 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f800003), 0x3f7ffffa, STR(__LINE__)); // 0x1p+0 / 0x1.000006p+0 = 0x1.fffff4p-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f800004), 0x3f7ffff8, STR(__LINE__)); // 0x1p+0 / 0x1.000008p+0 = 0x1.fffffp-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffffe), 0x3f800001, STR(__LINE__)); // 0x1p+0 / 0x1.fffffcp-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffffc), 0x3f800002, STR(__LINE__)); // 0x1p+0 / 0x1.fffff8p-1 = 0x1.000004p+0
+}
+
+void f416(void) {
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffff8), 0x3f800004, STR(__LINE__)); // 0x1p+0 / 0x1.fffffp-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f800002), 0x3f7ffffe, STR(__LINE__)); // 0x1.000002p+0 / 0x1.000004p+0 = 0x1.fffffcp-1
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f800003), 0x3f7ffffc, STR(__LINE__)); // 0x1.000002p+0 / 0x1.000006p+0 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f800003), 0x3f7ffffe, STR(__LINE__)); // 0x1.000004p+0 / 0x1.000006p+0 = 0x1.fffffcp-1
+ comp32(single_of_bits(0x3f800004) / single_of_bits(0x3f800007), 0x3f7ffffa, STR(__LINE__)); // 0x1.000008p+0 / 0x1.00000ep+0 = 0x1.fffff4p-1
+ comp32(single_of_bits(0x3f800006) / single_of_bits(0x3f800008), 0x3f7ffffc, STR(__LINE__)); // 0x1.00000cp+0 / 0x1.00001p+0 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f7ffffd), 0x3f800001, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.fffffap-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f7ffffc), 0x3f800001, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.fffff8p-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f7ffff9), 0x3f800003, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.fffff2p-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f7ffff9), 0x3f800002, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.fffff2p-1 = 0x1.000004p+0
+}
+
+void f417(void) {
+ comp32(single_of_bits(0x3f7ffffb) / single_of_bits(0x3f7ffff9), 0x3f800001, STR(__LINE__)); // 0x1.fffff6p-1 / 0x1.fffff2p-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f7ffffe), 0x3f800002, STR(__LINE__)); // 0x1.000002p+0 / 0x1.fffffcp-1 = 0x1.000004p+0
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f7ffffe), 0x3f800003, STR(__LINE__)); // 0x1.000004p+0 / 0x1.fffffcp-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800003) / single_of_bits(0x3f7ffffe), 0x3f800004, STR(__LINE__)); // 0x1.000006p+0 / 0x1.fffffcp-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f7ffffc), 0x3f800004, STR(__LINE__)); // 0x1.000004p+0 / 0x1.fffff8p-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800004) / single_of_bits(0x3f7ffffe), 0x3f800005, STR(__LINE__)); // 0x1.000008p+0 / 0x1.fffffcp-1 = 0x1.00000ap+0
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f800001), 0x3f7ffffd, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.000002p+0 = 0x1.fffffap-1
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f800001), 0x3f7ffffc, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.000002p+0 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f800002), 0x3f7ffffb, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.000004p+0 = 0x1.fffff6p-1
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f800001), 0x3f7ffffb, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.000002p+0 = 0x1.fffff6p-1
+}
+
+void f418(void) {
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f800003), 0x3f7ffff9, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.000006p+0 = 0x1.fffff2p-1
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f800002), 0x3f7ffffa, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.000004p+0 = 0x1.fffff4p-1
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0x3f800001), 0x3f7ffffa, STR(__LINE__)); // 0x1.fffff8p-1 / 0x1.000002p+0 = 0x1.fffff4p-1
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f800004), 0x3f7ffff7, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.000008p+0 = 0x1.ffffeep-1
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f800002), 0x3f7ffff9, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.000004p+0 = 0x1.fffff2p-1
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f800003), 0x3f7ffff8, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.000006p+0 = 0x1.fffffp-1
+ comp32(single_of_bits(0xbf7ffffc) / single_of_bits(0xbf800001), 0x3f7ffffa, STR(__LINE__)); // -0x1.fffff8p-1 / -0x1.000002p+0 = 0x1.fffff4p-1
+ comp32(single_of_bits(0xbf7ffffc) / single_of_bits(0x3f800001), 0xbf7ffffa, STR(__LINE__)); // -0x1.fffff8p-1 / 0x1.000002p+0 = -0x1.fffff4p-1
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0xbf800001), 0xbf7ffffa, STR(__LINE__)); // 0x1.fffff8p-1 / -0x1.000002p+0 = -0x1.fffff4p-1
+ comp32(single_of_bits(0x3fbfffff) / single_of_bits(0x3f7ffffe), 0x3fc00001, STR(__LINE__)); // 0x1.7ffffep+0 / 0x1.fffffcp-1 = 0x1.800002p+0
+}
+
+void f419(void) {
+ comp32(single_of_bits(0xbfbfffff) / single_of_bits(0xbf7ffffe), 0x3fc00001, STR(__LINE__)); // -0x1.7ffffep+0 / -0x1.fffffcp-1 = 0x1.800002p+0
+ comp32(single_of_bits(0xbfbfffff) / single_of_bits(0x3f7ffffe), 0xbfc00001, STR(__LINE__)); // -0x1.7ffffep+0 / 0x1.fffffcp-1 = -0x1.800002p+0
+ comp32(single_of_bits(0x3fbfffff) / single_of_bits(0xbf7ffffe), 0xbfc00001, STR(__LINE__)); // 0x1.7ffffep+0 / -0x1.fffffcp-1 = -0x1.800002p+0
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x00800001), 0x3efffffe, STR(__LINE__)); // 0x1p-127 / 0x1.000002p-126 = 0x1.fffffcp-2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x80800001), 0x3efffffe, STR(__LINE__)); // -0x1p-127 / -0x1.000002p-126 = 0x1.fffffcp-2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x00800001), 0xbefffffe, STR(__LINE__)); // -0x1p-127 / 0x1.000002p-126 = -0x1.fffffcp-2
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x80800001), 0xbefffffe, STR(__LINE__)); // 0x1p-127 / -0x1.000002p-126 = -0x1.fffffcp-2
+ comp32(single_of_bits(0x40400002) / single_of_bits(0x3f800001), 0x40400000, STR(__LINE__)); // 0x1.800004p+1 / 0x1.000002p+0 = 0x1.8p+1
+ comp32(single_of_bits(0xc0400002) / single_of_bits(0xbf800001), 0x40400000, STR(__LINE__)); // -0x1.800004p+1 / -0x1.000002p+0 = 0x1.8p+1
+ comp32(single_of_bits(0x40400002) / single_of_bits(0xbf800001), 0xc0400000, STR(__LINE__)); // 0x1.800004p+1 / -0x1.000002p+0 = -0x1.8p+1
+}
+
+void f420(void) {
+ comp32(single_of_bits(0xc0400002) / single_of_bits(0x3f800001), 0xc0400000, STR(__LINE__)); // -0x1.800004p+1 / 0x1.000002p+0 = -0x1.8p+1
+ comp32(single_of_bits(0x00600001) / single_of_bits(0x00800001), 0x3f400000, STR(__LINE__)); // 0x1.800004p-127 / 0x1.000002p-126 = 0x1.8p-1
+ comp32(single_of_bits(0x80600001) / single_of_bits(0x80800001), 0x3f400000, STR(__LINE__)); // -0x1.800004p-127 / -0x1.000002p-126 = 0x1.8p-1
+ comp32(single_of_bits(0x00600001) / single_of_bits(0x80800001), 0xbf400000, STR(__LINE__)); // 0x1.800004p-127 / -0x1.000002p-126 = -0x1.8p-1
+ comp32(single_of_bits(0x80600001) / single_of_bits(0x00800001), 0xbf400000, STR(__LINE__)); // -0x1.800004p-127 / 0x1.000002p-126 = -0x1.8p-1
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffffd), 0x3f800002, STR(__LINE__)); // 0x1p+0 / 0x1.fffffap-1 = 0x1.000004p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffffb), 0x3f800003, STR(__LINE__)); // 0x1p+0 / 0x1.fffff6p-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7ffff7), 0x3f800005, STR(__LINE__)); // 0x1p+0 / 0x1.ffffeep-1 = 0x1.00000ap+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0x3f7fffff), 0x3f800001, STR(__LINE__)); // 0x1p+0 / 0x1.fffffep-1 = 0x1.000002p+0
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0xbf7fffff), 0x3f800001, STR(__LINE__)); // -0x1p+0 / -0x1.fffffep-1 = 0x1.000002p+0
+}
+
+void f421(void) {
+ comp32(single_of_bits(0xbf800000) / single_of_bits(0x3f7fffff), 0xbf800001, STR(__LINE__)); // -0x1p+0 / 0x1.fffffep-1 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f800000) / single_of_bits(0xbf7fffff), 0xbf800001, STR(__LINE__)); // 0x1p+0 / -0x1.fffffep-1 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f7ffffe), 0x3f800001, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.fffffcp-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f7ffffd), 0x3f800001, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.fffffap-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7fffff) / single_of_bits(0x3f7ffffc), 0x3f800002, STR(__LINE__)); // 0x1.fffffep-1 / 0x1.fffff8p-1 = 0x1.000004p+0
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f7ffffc), 0x3f800001, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.fffff8p-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f7ffff9), 0x3f800003, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.fffff2p-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0x3f7ffff9), 0x3f800002, STR(__LINE__)); // 0x1.fffff8p-1 / 0x1.fffff2p-1 = 0x1.000004p+0
+ comp32(single_of_bits(0x3f7ffffa) / single_of_bits(0x3f7ffff9), 0x3f800001, STR(__LINE__)); // 0x1.fffff4p-1 / 0x1.fffff2p-1 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f7fffff), 0x3f800002, STR(__LINE__)); // 0x1.000002p+0 / 0x1.fffffep-1 = 0x1.000004p+0
+}
+
+void f422(void) {
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f7fffff), 0x3f800003, STR(__LINE__)); // 0x1.000004p+0 / 0x1.fffffep-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f7ffffd), 0x3f800003, STR(__LINE__)); // 0x1.000002p+0 / 0x1.fffffap-1 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800003) / single_of_bits(0x3f7fffff), 0x3f800004, STR(__LINE__)); // 0x1.000006p+0 / 0x1.fffffep-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f7ffffd), 0x3f800004, STR(__LINE__)); // 0x1.000004p+0 / 0x1.fffffap-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800003) / single_of_bits(0x3f7ffffd), 0x3f800005, STR(__LINE__)); // 0x1.000006p+0 / 0x1.fffffap-1 = 0x1.00000ap+0
+ comp32(single_of_bits(0x3f800001) / single_of_bits(0x3f7ffffb), 0x3f800004, STR(__LINE__)); // 0x1.000002p+0 / 0x1.fffff6p-1 = 0x1.000008p+0
+ comp32(single_of_bits(0x3f800005) / single_of_bits(0x3f7fffff), 0x3f800006, STR(__LINE__)); // 0x1.00000ap+0 / 0x1.fffffep-1 = 0x1.00000cp+0
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x00ffffff), 0x3e800001, STR(__LINE__)); // 0x1p-127 / 0x1.fffffep-126 = 0x1.000002p-2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x80ffffff), 0x3e800001, STR(__LINE__)); // -0x1p-127 / -0x1.fffffep-126 = 0x1.000002p-2
+ comp32(single_of_bits(0x80400000) / single_of_bits(0x00ffffff), 0xbe800001, STR(__LINE__)); // -0x1p-127 / 0x1.fffffep-126 = -0x1.000002p-2
+}
+
+void f423(void) {
+ comp32(single_of_bits(0x00400000) / single_of_bits(0x80ffffff), 0xbe800001, STR(__LINE__)); // 0x1p-127 / -0x1.fffffep-126 = -0x1.000002p-2
+ comp32(single_of_bits(0x3fc00001) / single_of_bits(0x3f800001), 0x3fc00000, STR(__LINE__)); // 0x1.800002p+0 / 0x1.000002p+0 = 0x1.8p+0
+ comp32(single_of_bits(0xbfc00001) / single_of_bits(0xbf800001), 0x3fc00000, STR(__LINE__)); // -0x1.800002p+0 / -0x1.000002p+0 = 0x1.8p+0
+ comp32(single_of_bits(0xbfc00001) / single_of_bits(0x3f800001), 0xbfc00000, STR(__LINE__)); // -0x1.800002p+0 / 0x1.000002p+0 = -0x1.8p+0
+ comp32(single_of_bits(0x3fc00001) / single_of_bits(0xbf800001), 0xbfc00000, STR(__LINE__)); // 0x1.800002p+0 / -0x1.000002p+0 = -0x1.8p+0
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0x3f800001), 0x3f800001, STR(__LINE__)); // 0x1.000004p+0 / 0x1.000002p+0 = 0x1.000002p+0
+ comp32(single_of_bits(0xbf800002) / single_of_bits(0xbf800001), 0x3f800001, STR(__LINE__)); // -0x1.000004p+0 / -0x1.000002p+0 = 0x1.000002p+0
+ comp32(single_of_bits(0xbf800002) / single_of_bits(0x3f800001), 0xbf800001, STR(__LINE__)); // -0x1.000004p+0 / 0x1.000002p+0 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f800002) / single_of_bits(0xbf800001), 0xbf800001, STR(__LINE__)); // 0x1.000004p+0 / -0x1.000002p+0 = -0x1.000002p+0
+ comp32(single_of_bits(0x3f800003) / single_of_bits(0x3f800001), 0x3f800002, STR(__LINE__)); // 0x1.000006p+0 / 0x1.000002p+0 = 0x1.000004p+0
+}
+
+void f424(void) {
+ comp32(single_of_bits(0x3f800004) / single_of_bits(0x3f800001), 0x3f800003, STR(__LINE__)); // 0x1.000008p+0 / 0x1.000002p+0 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800007) / single_of_bits(0x3f800002), 0x3f800005, STR(__LINE__)); // 0x1.00000ep+0 / 0x1.000004p+0 = 0x1.00000ap+0
+ comp32(single_of_bits(0x3f800009) / single_of_bits(0x3f800008), 0x3f800001, STR(__LINE__)); // 0x1.000012p+0 / 0x1.00001p+0 = 0x1.000002p+0
+ comp32(single_of_bits(0x3f7ffffe) / single_of_bits(0x3f7fffff), 0x3f7fffff, STR(__LINE__)); // 0x1.fffffcp-1 / 0x1.fffffep-1 = 0x1.fffffep-1
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f7fffff), 0x3f7ffffe, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.fffffep-1 = 0x1.fffffcp-1
+ comp32(single_of_bits(0x3f7ffffd) / single_of_bits(0x3f7ffffe), 0x3f7fffff, STR(__LINE__)); // 0x1.fffffap-1 / 0x1.fffffcp-1 = 0x1.fffffep-1
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0x3f7fffff), 0x3f7ffffd, STR(__LINE__)); // 0x1.fffff8p-1 / 0x1.fffffep-1 = 0x1.fffffap-1
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0x3f7ffffe), 0x3f7ffffe, STR(__LINE__)); // 0x1.fffff8p-1 / 0x1.fffffcp-1 = 0x1.fffffcp-1
+ comp32(single_of_bits(0x3f7ffffc) / single_of_bits(0x3f7ffffd), 0x3f7fffff, STR(__LINE__)); // 0x1.fffff8p-1 / 0x1.fffffap-1 = 0x1.fffffep-1
+ comp32(single_of_bits(0x3f7ffff8) / single_of_bits(0x3f7ffffd), 0x3f7ffffb, STR(__LINE__)); // 0x1.fffffp-1 / 0x1.fffffap-1 = 0x1.fffff6p-1
+}
+
+void f425(void) {
+ comp32(single_of_bits(0x3f7ffff7) / single_of_bits(0x3f7ffffe), 0x3f7ffff9, STR(__LINE__)); // 0x1.ffffeep-1 / 0x1.fffffcp-1 = 0x1.fffff2p-1
+ comp32(single_of_bits(0x3f7ffff8) / single_of_bits(0x3f7ffffc), 0x3f7ffffc, STR(__LINE__)); // 0x1.fffffp-1 / 0x1.fffff8p-1 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x3f7ffff7) / single_of_bits(0x3f7ffffb), 0x3f7ffffc, STR(__LINE__)); // 0x1.ffffeep-1 / 0x1.fffff6p-1 = 0x1.fffff8p-1
+ comp32(single_of_bits(0x007ffffe) / single_of_bits(0x00ffffff), 0x3efffffd, STR(__LINE__)); // 0x1.fffff8p-127 / 0x1.fffffep-126 = 0x1.fffffap-2
+ comp32(single_of_bits(0x807ffffe) / single_of_bits(0x80ffffff), 0x3efffffd, STR(__LINE__)); // -0x1.fffff8p-127 / -0x1.fffffep-126 = 0x1.fffffap-2
+ comp32(single_of_bits(0x807ffffe) / single_of_bits(0x00ffffff), 0xbefffffd, STR(__LINE__)); // -0x1.fffff8p-127 / 0x1.fffffep-126 = -0x1.fffffap-2
+ comp32(single_of_bits(0x007ffffe) / single_of_bits(0x80ffffff), 0xbefffffd, STR(__LINE__)); // 0x1.fffff8p-127 / -0x1.fffffep-126 = -0x1.fffffap-2
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x3f800000), 0x3f800000, STR(__LINE__)); // 0x1p+0 * 0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0xbf800000), 0x3f800000, STR(__LINE__)); // -0x1p+0 * -0x1p+0 = 0x1p+0
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x3f800000), 0xbf800000, STR(__LINE__)); // -0x1p+0 * 0x1p+0 = -0x1p+0
+}
+
+void f426(void) {
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0xbf800000), 0xbf800000, STR(__LINE__)); // 0x1p+0 * -0x1p+0 = -0x1p+0
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x40000000), 0x40000000, STR(__LINE__)); // 0x1p+0 * 0x1p+1 = 0x1p+1
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x3f800000), 0x40000000, STR(__LINE__)); // 0x1p+1 * 0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0xc0000000), 0x40000000, STR(__LINE__)); // -0x1p+0 * -0x1p+1 = 0x1p+1
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xbf800000), 0x40000000, STR(__LINE__)); // -0x1p+1 * -0x1p+0 = 0x1p+1
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x40000000), 0xc0000000, STR(__LINE__)); // -0x1p+0 * 0x1p+1 = -0x1p+1
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xbf800000), 0xc0000000, STR(__LINE__)); // 0x1p+1 * -0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0xc0000000), 0xc0000000, STR(__LINE__)); // 0x1p+0 * -0x1p+1 = -0x1p+1
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x3f800000), 0xc0000000, STR(__LINE__)); // -0x1p+1 * 0x1p+0 = -0x1p+1
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x40400000), 0x40c00000, STR(__LINE__)); // 0x1p+1 * 0x1.8p+1 = 0x1.8p+2
+}
+
+void f427(void) {
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x40000000), 0x40c00000, STR(__LINE__)); // 0x1.8p+1 * 0x1p+1 = 0x1.8p+2
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xc0400000), 0x40c00000, STR(__LINE__)); // -0x1p+1 * -0x1.8p+1 = 0x1.8p+2
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xc0000000), 0x40c00000, STR(__LINE__)); // -0x1.8p+1 * -0x1p+1 = 0x1.8p+2
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x40400000), 0xc0c00000, STR(__LINE__)); // -0x1p+1 * 0x1.8p+1 = -0x1.8p+2
+ comp32(single_of_bits(0x40400000) * single_of_bits(0xc0000000), 0xc0c00000, STR(__LINE__)); // 0x1.8p+1 * -0x1p+1 = -0x1.8p+2
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xc0400000), 0xc0c00000, STR(__LINE__)); // 0x1p+1 * -0x1.8p+1 = -0x1.8p+2
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x40000000), 0xc0c00000, STR(__LINE__)); // -0x1.8p+1 * 0x1p+1 = -0x1.8p+2
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x40400000), 0x41100000, STR(__LINE__)); // 0x1.8p+1 * 0x1.8p+1 = 0x1.2p+3
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xc0400000), 0x41100000, STR(__LINE__)); // -0x1.8p+1 * -0x1.8p+1 = 0x1.2p+3
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x40400000), 0xc1100000, STR(__LINE__)); // -0x1.8p+1 * 0x1.8p+1 = -0x1.2p+3
+}
+
+void f428(void) {
+ comp32(single_of_bits(0x40400000) * single_of_bits(0xc0400000), 0xc1100000, STR(__LINE__)); // 0x1.8p+1 * -0x1.8p+1 = -0x1.2p+3
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f800000), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1p+0 = 0x1p-149
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1p+0 * 0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf800000), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1p+0 = 0x1p-149
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1p+0 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f800000), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1p+0 = -0x1p-149
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1p+0 * -0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1p+0 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf800000), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1p+0 = -0x1p-149
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x3f800000), 0x00000002, STR(__LINE__)); // 0x1p-148 * 0x1p+0 = 0x1p-148
+}
+
+void f429(void) {
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00000002), 0x00000002, STR(__LINE__)); // 0x1p+0 * 0x1p-148 = 0x1p-148
+ comp32(single_of_bits(0x80000002) * single_of_bits(0xbf800000), 0x00000002, STR(__LINE__)); // -0x1p-148 * -0x1p+0 = 0x1p-148
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80000002), 0x00000002, STR(__LINE__)); // -0x1p+0 * -0x1p-148 = 0x1p-148
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x3f800000), 0x80000002, STR(__LINE__)); // -0x1p-148 * 0x1p+0 = -0x1p-148
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80000002), 0x80000002, STR(__LINE__)); // 0x1p+0 * -0x1p-148 = -0x1p-148
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00000002), 0x80000002, STR(__LINE__)); // -0x1p+0 * 0x1p-148 = -0x1p-148
+ comp32(single_of_bits(0x00000002) * single_of_bits(0xbf800000), 0x80000002, STR(__LINE__)); // 0x1p-148 * -0x1p+0 = -0x1p-148
+ comp32(single_of_bits(0x00000004) * single_of_bits(0x3f800000), 0x00000004, STR(__LINE__)); // 0x1p-147 * 0x1p+0 = 0x1p-147
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00000004), 0x00000004, STR(__LINE__)); // 0x1p+0 * 0x1p-147 = 0x1p-147
+ comp32(single_of_bits(0x80000004) * single_of_bits(0xbf800000), 0x00000004, STR(__LINE__)); // -0x1p-147 * -0x1p+0 = 0x1p-147
+}
+
+void f430(void) {
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80000004), 0x00000004, STR(__LINE__)); // -0x1p+0 * -0x1p-147 = 0x1p-147
+ comp32(single_of_bits(0x80000004) * single_of_bits(0x3f800000), 0x80000004, STR(__LINE__)); // -0x1p-147 * 0x1p+0 = -0x1p-147
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80000004), 0x80000004, STR(__LINE__)); // 0x1p+0 * -0x1p-147 = -0x1p-147
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00000004), 0x80000004, STR(__LINE__)); // -0x1p+0 * 0x1p-147 = -0x1p-147
+ comp32(single_of_bits(0x00000004) * single_of_bits(0xbf800000), 0x80000004, STR(__LINE__)); // 0x1p-147 * -0x1p+0 = -0x1p-147
+ comp32(single_of_bits(0x00fffffe) * single_of_bits(0x3f800000), 0x00fffffe, STR(__LINE__)); // 0x1.fffffcp-126 * 0x1p+0 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00fffffe), 0x00fffffe, STR(__LINE__)); // 0x1p+0 * 0x1.fffffcp-126 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x80fffffe) * single_of_bits(0xbf800000), 0x00fffffe, STR(__LINE__)); // -0x1.fffffcp-126 * -0x1p+0 = 0x1.fffffcp-126
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80fffffe), 0x00fffffe, STR(__LINE__)); // -0x1p+0 * -0x1.fffffcp-126 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x80fffffe) * single_of_bits(0x3f800000), 0x80fffffe, STR(__LINE__)); // -0x1.fffffcp-126 * 0x1p+0 = -0x1.fffffcp-126
+}
+
+void f431(void) {
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80fffffe), 0x80fffffe, STR(__LINE__)); // 0x1p+0 * -0x1.fffffcp-126 = -0x1.fffffcp-126
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00fffffe), 0x80fffffe, STR(__LINE__)); // -0x1p+0 * 0x1.fffffcp-126 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x00fffffe) * single_of_bits(0xbf800000), 0x80fffffe, STR(__LINE__)); // 0x1.fffffcp-126 * -0x1p+0 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80000009), 0x80000009, STR(__LINE__)); // 0x1p+0 * -0x1.2p-146 = -0x1.2p-146
+ comp32(single_of_bits(0x80000009) * single_of_bits(0x3f800000), 0x80000009, STR(__LINE__)); // -0x1.2p-146 * 0x1p+0 = -0x1.2p-146
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00000009), 0x80000009, STR(__LINE__)); // -0x1p+0 * 0x1.2p-146 = -0x1.2p-146
+ comp32(single_of_bits(0x00000009) * single_of_bits(0xbf800000), 0x80000009, STR(__LINE__)); // 0x1.2p-146 * -0x1p+0 = -0x1.2p-146
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80000009), 0x00000009, STR(__LINE__)); // -0x1p+0 * -0x1.2p-146 = 0x1.2p-146
+ comp32(single_of_bits(0x80000009) * single_of_bits(0xbf800000), 0x00000009, STR(__LINE__)); // -0x1.2p-146 * -0x1p+0 = 0x1.2p-146
+ comp32(single_of_bits(0x00000009) * single_of_bits(0x3f800000), 0x00000009, STR(__LINE__)); // 0x1.2p-146 * 0x1p+0 = 0x1.2p-146
+}
+
+void f432(void) {
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00000009), 0x00000009, STR(__LINE__)); // 0x1p+0 * 0x1.2p-146 = 0x1.2p-146
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x807fffff), 0x807fffff, STR(__LINE__)); // 0x1p+0 * -0x1.fffffcp-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x3f800000), 0x807fffff, STR(__LINE__)); // -0x1.fffffcp-127 * 0x1p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x007fffff), 0x807fffff, STR(__LINE__)); // -0x1p+0 * 0x1.fffffcp-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0xbf800000), 0x807fffff, STR(__LINE__)); // 0x1.fffffcp-127 * -0x1p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x807fffff), 0x007fffff, STR(__LINE__)); // -0x1p+0 * -0x1.fffffcp-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0xbf800000), 0x007fffff, STR(__LINE__)); // -0x1.fffffcp-127 * -0x1p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x3f800000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-127 * 0x1p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x007fffff), 0x007fffff, STR(__LINE__)); // 0x1p+0 * 0x1.fffffcp-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80800001), 0x80800001, STR(__LINE__)); // 0x1p+0 * -0x1.000002p-126 = -0x1.000002p-126
+}
+
+void f433(void) {
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x3f800000), 0x80800001, STR(__LINE__)); // -0x1.000002p-126 * 0x1p+0 = -0x1.000002p-126
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00800001), 0x80800001, STR(__LINE__)); // -0x1p+0 * 0x1.000002p-126 = -0x1.000002p-126
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xbf800000), 0x80800001, STR(__LINE__)); // 0x1.000002p-126 * -0x1p+0 = -0x1.000002p-126
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80800001), 0x00800001, STR(__LINE__)); // -0x1p+0 * -0x1.000002p-126 = 0x1.000002p-126
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xbf800000), 0x00800001, STR(__LINE__)); // -0x1.000002p-126 * -0x1p+0 = 0x1.000002p-126
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x3f800000), 0x00800001, STR(__LINE__)); // 0x1.000002p-126 * 0x1p+0 = 0x1.000002p-126
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00800001), 0x00800001, STR(__LINE__)); // 0x1p+0 * 0x1.000002p-126 = 0x1.000002p-126
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x01000003), 0x01000003, STR(__LINE__)); // 0x1p+0 * 0x1.000006p-125 = 0x1.000006p-125
+ comp32(single_of_bits(0x01000003) * single_of_bits(0x3f800000), 0x01000003, STR(__LINE__)); // 0x1.000006p-125 * 0x1p+0 = 0x1.000006p-125
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00800009), 0x80800009, STR(__LINE__)); // -0x1p+0 * 0x1.000012p-126 = -0x1.000012p-126
+}
+
+void f434(void) {
+ comp32(single_of_bits(0x00800009) * single_of_bits(0xbf800000), 0x80800009, STR(__LINE__)); // 0x1.000012p-126 * -0x1p+0 = -0x1.000012p-126
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x007ffffd), 0x007ffffd, STR(__LINE__)); // 0x1p+0 * 0x1.fffff4p-127 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x007ffffd) * single_of_bits(0x3f800000), 0x007ffffd, STR(__LINE__)); // 0x1.fffff4p-127 * 0x1p+0 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7effffff), 0x7f7fffff, STR(__LINE__)); // 0x1p+1 * 0x1.fffffep+126 = 0x1.fffffep+127
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0x40000000), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+126 * 0x1p+1 = 0x1.fffffep+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xfeffffff), 0x7f7fffff, STR(__LINE__)); // -0x1p+1 * -0x1.fffffep+126 = 0x1.fffffep+127
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0xc0000000), 0x7f7fffff, STR(__LINE__)); // -0x1.fffffep+126 * -0x1p+1 = 0x1.fffffep+127
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0xc0000000), 0xff7fffff, STR(__LINE__)); // 0x1.fffffep+126 * -0x1p+1 = -0x1.fffffep+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7effffff), 0xff7fffff, STR(__LINE__)); // -0x1p+1 * 0x1.fffffep+126 = -0x1.fffffep+127
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0x40000000), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+126 * 0x1p+1 = -0x1.fffffep+127
+}
+
+void f435(void) {
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xfeffffff), 0xff7fffff, STR(__LINE__)); // 0x1p+1 * -0x1.fffffep+126 = -0x1.fffffep+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7e800003), 0x7f000003, STR(__LINE__)); // 0x1p+1 * 0x1.000006p+126 = 0x1.000006p+127
+ comp32(single_of_bits(0x7e800003) * single_of_bits(0x40000000), 0x7f000003, STR(__LINE__)); // 0x1.000006p+126 * 0x1p+1 = 0x1.000006p+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xfe800003), 0x7f000003, STR(__LINE__)); // -0x1p+1 * -0x1.000006p+126 = 0x1.000006p+127
+ comp32(single_of_bits(0xfe800003) * single_of_bits(0xc0000000), 0x7f000003, STR(__LINE__)); // -0x1.000006p+126 * -0x1p+1 = 0x1.000006p+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xfe800003), 0xff000003, STR(__LINE__)); // 0x1p+1 * -0x1.000006p+126 = -0x1.000006p+127
+ comp32(single_of_bits(0xfe800003) * single_of_bits(0x40000000), 0xff000003, STR(__LINE__)); // -0x1.000006p+126 * 0x1p+1 = -0x1.000006p+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7e800003), 0xff000003, STR(__LINE__)); // -0x1p+1 * 0x1.000006p+126 = -0x1.000006p+127
+ comp32(single_of_bits(0x7e800003) * single_of_bits(0xc0000000), 0xff000003, STR(__LINE__)); // 0x1.000006p+126 * -0x1p+1 = -0x1.000006p+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7e800001), 0x7f000001, STR(__LINE__)); // 0x1p+1 * 0x1.000002p+126 = 0x1.000002p+127
+}
+
+void f436(void) {
+ comp32(single_of_bits(0x7e800001) * single_of_bits(0x40000000), 0x7f000001, STR(__LINE__)); // 0x1.000002p+126 * 0x1p+1 = 0x1.000002p+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xfe800001), 0x7f000001, STR(__LINE__)); // -0x1p+1 * -0x1.000002p+126 = 0x1.000002p+127
+ comp32(single_of_bits(0xfe800001) * single_of_bits(0xc0000000), 0x7f000001, STR(__LINE__)); // -0x1.000002p+126 * -0x1p+1 = 0x1.000002p+127
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7e800001), 0xff000001, STR(__LINE__)); // -0x1p+1 * 0x1.000002p+126 = -0x1.000002p+127
+ comp32(single_of_bits(0x7e800001) * single_of_bits(0xc0000000), 0xff000001, STR(__LINE__)); // 0x1.000002p+126 * -0x1p+1 = -0x1.000002p+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xfe800001), 0xff000001, STR(__LINE__)); // 0x1p+1 * -0x1.000002p+126 = -0x1.000002p+127
+ comp32(single_of_bits(0xfe800001) * single_of_bits(0x40000000), 0xff000001, STR(__LINE__)); // -0x1.000002p+126 * 0x1p+1 = -0x1.000002p+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7e800000), 0x7f000000, STR(__LINE__)); // 0x1p+1 * 0x1p+126 = 0x1p+127
+ comp32(single_of_bits(0x7e800000) * single_of_bits(0x40000000), 0x7f000000, STR(__LINE__)); // 0x1p+126 * 0x1p+1 = 0x1p+127
+ comp32(single_of_bits(0x7e800000) * single_of_bits(0xc0000000), 0xff000000, STR(__LINE__)); // 0x1p+126 * -0x1p+1 = -0x1p+127
+}
+
+void f437(void) {
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7e800000), 0xff000000, STR(__LINE__)); // -0x1p+1 * 0x1p+126 = -0x1p+127
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7e7fffff), 0x7effffff, STR(__LINE__)); // 0x1p+1 * 0x1.fffffep+125 = 0x1.fffffep+126
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0x40000000), 0x7effffff, STR(__LINE__)); // 0x1.fffffep+125 * 0x1p+1 = 0x1.fffffep+126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xfe7fffff), 0x7effffff, STR(__LINE__)); // -0x1p+1 * -0x1.fffffep+125 = 0x1.fffffep+126
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0xc0000000), 0x7effffff, STR(__LINE__)); // -0x1.fffffep+125 * -0x1p+1 = 0x1.fffffep+126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7e7fffff), 0xfeffffff, STR(__LINE__)); // -0x1p+1 * 0x1.fffffep+125 = -0x1.fffffep+126
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0xc0000000), 0xfeffffff, STR(__LINE__)); // 0x1.fffffep+125 * -0x1p+1 = -0x1.fffffep+126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xfe7fffff), 0xfeffffff, STR(__LINE__)); // 0x1p+1 * -0x1.fffffep+125 = -0x1.fffffep+126
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x40000000), 0xfeffffff, STR(__LINE__)); // -0x1.fffffep+125 * 0x1p+1 = -0x1.fffffep+126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7e7ffffd), 0x7efffffd, STR(__LINE__)); // 0x1p+1 * 0x1.fffffap+125 = 0x1.fffffap+126
+}
+
+void f438(void) {
+ comp32(single_of_bits(0x7e7ffffd) * single_of_bits(0x40000000), 0x7efffffd, STR(__LINE__)); // 0x1.fffffap+125 * 0x1p+1 = 0x1.fffffap+126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0xfe7ffffd), 0x7efffffd, STR(__LINE__)); // -0x1p+1 * -0x1.fffffap+125 = 0x1.fffffap+126
+ comp32(single_of_bits(0xfe7ffffd) * single_of_bits(0xc0000000), 0x7efffffd, STR(__LINE__)); // -0x1.fffffap+125 * -0x1p+1 = 0x1.fffffap+126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0xfe7ffffd), 0xfefffffd, STR(__LINE__)); // 0x1p+1 * -0x1.fffffap+125 = -0x1.fffffap+126
+ comp32(single_of_bits(0xfe7ffffd) * single_of_bits(0x40000000), 0xfefffffd, STR(__LINE__)); // -0x1.fffffap+125 * 0x1p+1 = -0x1.fffffap+126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7e7ffffd), 0xfefffffd, STR(__LINE__)); // -0x1p+1 * 0x1.fffffap+125 = -0x1.fffffap+126
+ comp32(single_of_bits(0x7e7ffffd) * single_of_bits(0xc0000000), 0xfefffffd, STR(__LINE__)); // 0x1.fffffap+125 * -0x1p+1 = -0x1.fffffap+126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00800000), 0x01000000, STR(__LINE__)); // 0x1p+1 * 0x1p-126 = 0x1p-125
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x40000000), 0x01000000, STR(__LINE__)); // 0x1p-126 * 0x1p+1 = 0x1p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800000), 0x01000000, STR(__LINE__)); // -0x1p+1 * -0x1p-126 = 0x1p-125
+}
+
+void f439(void) {
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xc0000000), 0x01000000, STR(__LINE__)); // -0x1p-126 * -0x1p+1 = 0x1p-125
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xc0000000), 0x81000000, STR(__LINE__)); // 0x1p-126 * -0x1p+1 = -0x1p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00800000), 0x81000000, STR(__LINE__)); // -0x1p+1 * 0x1p-126 = -0x1p-125
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x40000000), 0x81000000, STR(__LINE__)); // -0x1p-126 * 0x1p+1 = -0x1p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80800000), 0x81000000, STR(__LINE__)); // 0x1p+1 * -0x1p-126 = -0x1p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00800001), 0x01000001, STR(__LINE__)); // 0x1p+1 * 0x1.000002p-126 = 0x1.000002p-125
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x40000000), 0x01000001, STR(__LINE__)); // 0x1.000002p-126 * 0x1p+1 = 0x1.000002p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800001), 0x01000001, STR(__LINE__)); // -0x1p+1 * -0x1.000002p-126 = 0x1.000002p-125
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xc0000000), 0x01000001, STR(__LINE__)); // -0x1.000002p-126 * -0x1p+1 = 0x1.000002p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00800001), 0x81000001, STR(__LINE__)); // -0x1p+1 * 0x1.000002p-126 = -0x1.000002p-125
+}
+
+void f440(void) {
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xc0000000), 0x81000001, STR(__LINE__)); // 0x1.000002p-126 * -0x1p+1 = -0x1.000002p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80800001), 0x81000001, STR(__LINE__)); // 0x1p+1 * -0x1.000002p-126 = -0x1.000002p-125
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x40000000), 0x81000001, STR(__LINE__)); // -0x1.000002p-126 * 0x1p+1 = -0x1.000002p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00800003), 0x01000003, STR(__LINE__)); // 0x1p+1 * 0x1.000006p-126 = 0x1.000006p-125
+ comp32(single_of_bits(0x00800003) * single_of_bits(0x40000000), 0x01000003, STR(__LINE__)); // 0x1.000006p-126 * 0x1p+1 = 0x1.000006p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800003), 0x01000003, STR(__LINE__)); // -0x1p+1 * -0x1.000006p-126 = 0x1.000006p-125
+ comp32(single_of_bits(0x80800003) * single_of_bits(0xc0000000), 0x01000003, STR(__LINE__)); // -0x1.000006p-126 * -0x1p+1 = 0x1.000006p-125
+ comp32(single_of_bits(0x80800003) * single_of_bits(0xc0000000), 0x01000003, STR(__LINE__)); // -0x1.000006p-126 * -0x1p+1 = 0x1.000006p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800003), 0x01000003, STR(__LINE__)); // -0x1p+1 * -0x1.000006p-126 = 0x1.000006p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80800003), 0x81000003, STR(__LINE__)); // 0x1p+1 * -0x1.000006p-126 = -0x1.000006p-125
+}
+
+void f441(void) {
+ comp32(single_of_bits(0x80800003) * single_of_bits(0x40000000), 0x81000003, STR(__LINE__)); // -0x1.000006p-126 * 0x1p+1 = -0x1.000006p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00800003), 0x81000003, STR(__LINE__)); // -0x1p+1 * 0x1.000006p-126 = -0x1.000006p-125
+ comp32(single_of_bits(0x00800003) * single_of_bits(0xc0000000), 0x81000003, STR(__LINE__)); // 0x1.000006p-126 * -0x1p+1 = -0x1.000006p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00800005), 0x01000005, STR(__LINE__)); // 0x1p+1 * 0x1.00000ap-126 = 0x1.00000ap-125
+ comp32(single_of_bits(0x00800005) * single_of_bits(0x40000000), 0x01000005, STR(__LINE__)); // 0x1.00000ap-126 * 0x1p+1 = 0x1.00000ap-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800005), 0x01000005, STR(__LINE__)); // -0x1p+1 * -0x1.00000ap-126 = 0x1.00000ap-125
+ comp32(single_of_bits(0x80800005) * single_of_bits(0xc0000000), 0x01000005, STR(__LINE__)); // -0x1.00000ap-126 * -0x1p+1 = 0x1.00000ap-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80800005), 0x81000005, STR(__LINE__)); // 0x1p+1 * -0x1.00000ap-126 = -0x1.00000ap-125
+ comp32(single_of_bits(0x80800005) * single_of_bits(0x40000000), 0x81000005, STR(__LINE__)); // -0x1.00000ap-126 * 0x1p+1 = -0x1.00000ap-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00800005), 0x81000005, STR(__LINE__)); // -0x1p+1 * 0x1.00000ap-126 = -0x1.00000ap-125
+}
+
+void f442(void) {
+ comp32(single_of_bits(0x00800005) * single_of_bits(0xc0000000), 0x81000005, STR(__LINE__)); // 0x1.00000ap-126 * -0x1p+1 = -0x1.00000ap-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00800009), 0x01000009, STR(__LINE__)); // 0x1p+1 * 0x1.000012p-126 = 0x1.000012p-125
+ comp32(single_of_bits(0x00800009) * single_of_bits(0x40000000), 0x01000009, STR(__LINE__)); // 0x1.000012p-126 * 0x1p+1 = 0x1.000012p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80800009), 0x01000009, STR(__LINE__)); // -0x1p+1 * -0x1.000012p-126 = 0x1.000012p-125
+ comp32(single_of_bits(0x80800009) * single_of_bits(0xc0000000), 0x01000009, STR(__LINE__)); // -0x1.000012p-126 * -0x1p+1 = 0x1.000012p-125
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00800009), 0x81000009, STR(__LINE__)); // -0x1p+1 * 0x1.000012p-126 = -0x1.000012p-125
+ comp32(single_of_bits(0x00800009) * single_of_bits(0xc0000000), 0x81000009, STR(__LINE__)); // 0x1.000012p-126 * -0x1p+1 = -0x1.000012p-125
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80800009), 0x81000009, STR(__LINE__)); // 0x1p+1 * -0x1.000012p-126 = -0x1.000012p-125
+ comp32(single_of_bits(0x80800009) * single_of_bits(0x40000000), 0x81000009, STR(__LINE__)); // -0x1.000012p-126 * 0x1p+1 = -0x1.000012p-125
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x7e7fffff), 0x7f7fffff, STR(__LINE__)); // 0x1p+2 * 0x1.fffffep+125 = 0x1.fffffep+127
+}
+
+void f443(void) {
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0x40800000), 0x7f7fffff, STR(__LINE__)); // 0x1.fffffep+125 * 0x1p+2 = 0x1.fffffep+127
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x7e7fffff), 0xff7fffff, STR(__LINE__)); // -0x1p+2 * 0x1.fffffep+125 = -0x1.fffffep+127
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0xc0800000), 0xff7fffff, STR(__LINE__)); // 0x1.fffffep+125 * -0x1p+2 = -0x1.fffffep+127
+ comp32(single_of_bits(0x40800000) * single_of_bits(0xfe7fffff), 0xff7fffff, STR(__LINE__)); // 0x1p+2 * -0x1.fffffep+125 = -0x1.fffffep+127
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x40800000), 0xff7fffff, STR(__LINE__)); // -0x1.fffffep+125 * 0x1p+2 = -0x1.fffffep+127
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0xfe7fffff), 0x7f7fffff, STR(__LINE__)); // -0x1p+2 * -0x1.fffffep+125 = 0x1.fffffep+127
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0xc0800000), 0x7f7fffff, STR(__LINE__)); // -0x1.fffffep+125 * -0x1p+2 = 0x1.fffffep+127
+ comp32(single_of_bits(0x7e7ffffd) * single_of_bits(0x40800000), 0x7f7ffffd, STR(__LINE__)); // 0x1.fffffap+125 * 0x1p+2 = 0x1.fffffap+127
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x7e7ffffd), 0x7f7ffffd, STR(__LINE__)); // 0x1p+2 * 0x1.fffffap+125 = 0x1.fffffap+127
+ comp32(single_of_bits(0x7e7ffffd) * single_of_bits(0xc0800000), 0xff7ffffd, STR(__LINE__)); // 0x1.fffffap+125 * -0x1p+2 = -0x1.fffffap+127
+}
+
+void f444(void) {
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x7e7ffffd), 0xff7ffffd, STR(__LINE__)); // -0x1p+2 * 0x1.fffffap+125 = -0x1.fffffap+127
+ comp32(single_of_bits(0xfe7ffffd) * single_of_bits(0x40800000), 0xff7ffffd, STR(__LINE__)); // -0x1.fffffap+125 * 0x1p+2 = -0x1.fffffap+127
+ comp32(single_of_bits(0x40800000) * single_of_bits(0xfe7ffffd), 0xff7ffffd, STR(__LINE__)); // 0x1p+2 * -0x1.fffffap+125 = -0x1.fffffap+127
+ comp32(single_of_bits(0xfe7ffffd) * single_of_bits(0xc0800000), 0x7f7ffffd, STR(__LINE__)); // -0x1.fffffap+125 * -0x1p+2 = 0x1.fffffap+127
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0xfe7ffffd), 0x7f7ffffd, STR(__LINE__)); // -0x1p+2 * -0x1.fffffap+125 = 0x1.fffffap+127
+ comp32(single_of_bits(0x3b000000) * single_of_bits(0x43800000), 0x3f000000, STR(__LINE__)); // 0x1p-9 * 0x1p+8 = 0x1p-1
+ comp32(single_of_bits(0x43800000) * single_of_bits(0x3b000000), 0x3f000000, STR(__LINE__)); // 0x1p+8 * 0x1p-9 = 0x1p-1
+ comp32(single_of_bits(0xbb000000) * single_of_bits(0xc3800000), 0x3f000000, STR(__LINE__)); // -0x1p-9 * -0x1p+8 = 0x1p-1
+ comp32(single_of_bits(0xc3800000) * single_of_bits(0xbb000000), 0x3f000000, STR(__LINE__)); // -0x1p+8 * -0x1p-9 = 0x1p-1
+ comp32(single_of_bits(0xbb000000) * single_of_bits(0x43800000), 0xbf000000, STR(__LINE__)); // -0x1p-9 * 0x1p+8 = -0x1p-1
+}
+
+void f445(void) {
+ comp32(single_of_bits(0x43800000) * single_of_bits(0xbb000000), 0xbf000000, STR(__LINE__)); // 0x1p+8 * -0x1p-9 = -0x1p-1
+ comp32(single_of_bits(0xc3800000) * single_of_bits(0x3b000000), 0xbf000000, STR(__LINE__)); // -0x1p+8 * 0x1p-9 = -0x1p-1
+ comp32(single_of_bits(0x3b000000) * single_of_bits(0xc3800000), 0xbf000000, STR(__LINE__)); // 0x1p-9 * -0x1p+8 = -0x1p-1
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0x3c800000), 0x3b000000, STR(__LINE__)); // 0x1p-3 * 0x1p-6 = 0x1p-9
+ comp32(single_of_bits(0x3c800000) * single_of_bits(0x3e000000), 0x3b000000, STR(__LINE__)); // 0x1p-6 * 0x1p-3 = 0x1p-9
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0xbc800000), 0x3b000000, STR(__LINE__)); // -0x1p-3 * -0x1p-6 = 0x1p-9
+ comp32(single_of_bits(0xbc800000) * single_of_bits(0xbe000000), 0x3b000000, STR(__LINE__)); // -0x1p-6 * -0x1p-3 = 0x1p-9
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0x3c800000), 0xbb000000, STR(__LINE__)); // -0x1p-3 * 0x1p-6 = -0x1p-9
+ comp32(single_of_bits(0x3c800000) * single_of_bits(0xbe000000), 0xbb000000, STR(__LINE__)); // 0x1p-6 * -0x1p-3 = -0x1p-9
+ comp32(single_of_bits(0xbc800000) * single_of_bits(0x3e000000), 0xbb000000, STR(__LINE__)); // -0x1p-6 * 0x1p-3 = -0x1p-9
+}
+
+void f446(void) {
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0xbc800000), 0xbb000000, STR(__LINE__)); // 0x1p-3 * -0x1p-6 = -0x1p-9
+ comp32(single_of_bits(0x41000000) * single_of_bits(0x44000000), 0x45800000, STR(__LINE__)); // 0x1p+3 * 0x1p+9 = 0x1p+12
+ comp32(single_of_bits(0x44000000) * single_of_bits(0x41000000), 0x45800000, STR(__LINE__)); // 0x1p+9 * 0x1p+3 = 0x1p+12
+ comp32(single_of_bits(0xc1000000) * single_of_bits(0xc4000000), 0x45800000, STR(__LINE__)); // -0x1p+3 * -0x1p+9 = 0x1p+12
+ comp32(single_of_bits(0xc4000000) * single_of_bits(0xc1000000), 0x45800000, STR(__LINE__)); // -0x1p+9 * -0x1p+3 = 0x1p+12
+ comp32(single_of_bits(0xc1000000) * single_of_bits(0x44000000), 0xc5800000, STR(__LINE__)); // -0x1p+3 * 0x1p+9 = -0x1p+12
+ comp32(single_of_bits(0x44000000) * single_of_bits(0xc1000000), 0xc5800000, STR(__LINE__)); // 0x1p+9 * -0x1p+3 = -0x1p+12
+ comp32(single_of_bits(0xc4000000) * single_of_bits(0x41000000), 0xc5800000, STR(__LINE__)); // -0x1p+9 * 0x1p+3 = -0x1p+12
+ comp32(single_of_bits(0x41000000) * single_of_bits(0xc4000000), 0xc5800000, STR(__LINE__)); // 0x1p+3 * -0x1p+9 = -0x1p+12
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x0p+0 = 0x0p+0
+}
+
+void f447(void) {
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-149 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p-149 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-149 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80000001), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00000001), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p-149 = -0x0p+0
+}
+
+void f448(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p-149 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00000002), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-148 = 0x0p+0
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-148 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80000002), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-148 = 0x0p+0
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p-148 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80000002), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p-148 = -0x0p+0
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-148 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00000002), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p-148 = -0x0p+0
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p-148 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000003) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.8p-148 * 0x0p+0 = 0x0p+0
+}
+
+void f449(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00000003), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.8p-148 = 0x0p+0
+ comp32(single_of_bits(0x80000003) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.8p-148 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80000003), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.8p-148 = 0x0p+0
+ comp32(single_of_bits(0x80000003) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.8p-148 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80000003), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.8p-148 = -0x0p+0
+ comp32(single_of_bits(0x00000003) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.8p-148 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00000003), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.8p-148 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00000004), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-147 = 0x0p+0
+ comp32(single_of_bits(0x00000004) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-147 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80000004), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-147 = 0x0p+0
+}
+
+void f450(void) {
+ comp32(single_of_bits(0x80000004) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p-147 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000004) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p-147 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00000004), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p-147 = -0x0p+0
+ comp32(single_of_bits(0x80000004) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-147 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80000004), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p-147 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x007fffff), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.fffffcp-127 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x807fffff), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.fffffcp-127 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.fffffcp-127 * 0x0p+0 = -0x0p+0
+}
+
+void f451(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x807fffff), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffcp-127 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x3f800000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+0 = 0x0p+0
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p+0 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xbf800000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+0 = 0x0p+0
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p+0 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x3f800000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p+0 = -0x0p+0
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p+0 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+0 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xbf800000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+0 = -0x0p+0
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+1 * 0x0p+0 = -0x0p+0
+}
+
+void f452(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0000000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+1 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x40400000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.8p+1 = 0x0p+0
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.8p+1 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc0400000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.8p+1 = 0x0p+0
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.8p+1 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0400000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.8p+1 = -0x0p+0
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.8p+1 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x40400000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.8p+1 = -0x0p+0
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.8p+1 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p+2 * 0x0p+0 = 0x0p+0
+}
+
+void f453(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x40800000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p+2 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc0800000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+2 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0800000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+2 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x40800000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p+2 = -0x0p+0
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p+2 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.4p+2 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x40a00000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.4p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.4p+2 * -0x0p+0 = 0x0p+0
+}
+
+void f454(void) {
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc0a00000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.4p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.4p+2 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0a00000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.4p+2 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x40a00000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.4p+2 = -0x0p+0
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.4p+2 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x40c00000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.8p+2 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x40c00000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.8p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0c00000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.8p+2 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc0c00000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.8p+2 = 0x0p+0
+ comp32(single_of_bits(0xc0c00000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.8p+2 * 0x0p+0 = -0x0p+0
+}
+
+void f455(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0c00000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.8p+2 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x40c00000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.8p+2 = -0x0p+0
+ comp32(single_of_bits(0x40c00000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.8p+2 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.cp+2 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x40e00000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.cp+2 = 0x0p+0
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.cp+2 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc0e00000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.cp+2 = 0x0p+0
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.cp+2 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc0e00000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.cp+2 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x40e00000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.cp+2 = -0x0p+0
+}
+
+void f456(void) {
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.cp+2 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x41000000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p+3 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x41000000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+3 = 0x0p+0
+ comp32(single_of_bits(0xc1000000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p+3 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xc1000000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+3 = 0x0p+0
+ comp32(single_of_bits(0xc1000000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+3 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xc1000000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+3 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x41000000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p+3 = -0x0p+0
+ comp32(single_of_bits(0x41000000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p+3 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p+127 * 0x0p+0 = 0x0p+0
+}
+
+void f457(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7f000000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+127 = 0x0p+0
+ comp32(single_of_bits(0xff000000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p+127 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xff000000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+127 = 0x0p+0
+ comp32(single_of_bits(0xff000000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+127 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xff000000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+127 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7f000000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p+127 = -0x0p+0
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p+127 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x7e800000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p+126 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7e800000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p+126 = 0x0p+0
+ comp32(single_of_bits(0xfe800000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p+126 * -0x0p+0 = 0x0p+0
+}
+
+void f458(void) {
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xfe800000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p+126 = 0x0p+0
+ comp32(single_of_bits(0xfe800000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p+126 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xfe800000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p+126 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7e800000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p+126 = -0x0p+0
+ comp32(single_of_bits(0x7e800000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p+126 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.fffffep+126 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7effffff), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffep+126 = 0x0p+0
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.fffffep+126 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xfeffffff), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffep+126 = 0x0p+0
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.fffffep+126 * 0x0p+0 = -0x0p+0
+}
+
+void f459(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xfeffffff), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffep+126 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7effffff), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffep+126 = -0x0p+0
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.fffffep+126 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.fffffep+125 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7e7fffff), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffep+125 = 0x0p+0
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.fffffep+125 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xfe7fffff), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffep+125 = 0x0p+0
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.fffffep+125 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xfe7fffff), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffep+125 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7e7fffff), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffep+125 = -0x0p+0
+}
+
+void f460(void) {
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.fffffep+125 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7f7fffff), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0x7f7fffff) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.fffffep+127 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.fffffep+127 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xff7fffff), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffep+127 = 0x0p+0
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.fffffep+127 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xff7fffff), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffep+127 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7f7fffff), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffep+127 = -0x0p+0
+ comp32(single_of_bits(0x7f7fffff) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.fffffep+127 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-126 = 0x0p+0
+}
+
+void f461(void) {
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x01000000), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1p-125 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1p-125 * -0x0p+0 = 0x0p+0
+}
+
+void f462(void) {
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x81000000), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1p-125 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x81000000), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1p-125 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x01000000), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00ffffff), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.fffffep-126 = 0x0p+0
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.fffffep-126 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.fffffep-126 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80ffffff), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.fffffep-126 = 0x0p+0
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.fffffep-126 * 0x0p+0 = -0x0p+0
+}
+
+void f463(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80ffffff), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.fffffep-126 = -0x0p+0
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.fffffep-126 * -0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00ffffff), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.fffffep-126 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x00800001), 0x00000000, STR(__LINE__)); // 0x0p+0 * 0x1.000002p-126 = 0x0p+0
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x00000000), 0x00000000, STR(__LINE__)); // 0x1.000002p-126 * 0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x80000000), 0x00000000, STR(__LINE__)); // -0x1.000002p-126 * -0x0p+0 = 0x0p+0
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x80800001), 0x00000000, STR(__LINE__)); // -0x0p+0 * -0x1.000002p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x00000000), 0x80000000, STR(__LINE__)); // -0x1.000002p-126 * 0x0p+0 = -0x0p+0
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x80800001), 0x80000000, STR(__LINE__)); // 0x0p+0 * -0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x80000000), 0x80000000, STR(__LINE__)); // 0x1.000002p-126 * -0x0p+0 = -0x0p+0
+}
+
+void f464(void) {
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x00800001), 0x80000000, STR(__LINE__)); // -0x0p+0 * 0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00000000), 0x7fc00000, STR(__LINE__)); // inf * 0x0p+0 = nan
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // 0x0p+0 * inf = nan
+ comp32(single_of_bits(0x80000000) * single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // -0x0p+0 * -inf = nan
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80000000), 0x7fc00000, STR(__LINE__)); // -inf * -0x0p+0 = nan
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7f800000), 0xffc00000, STR(__LINE__)); // -0x0p+0 * inf = -nan
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80000000), 0xffc00000, STR(__LINE__)); // inf * -0x0p+0 = -nan
+ comp32(single_of_bits(0x00000000) * single_of_bits(0xff800000), 0xffc00000, STR(__LINE__)); // 0x0p+0 * -inf = -nan
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x00000000), 0xffc00000, STR(__LINE__)); // -inf * 0x0p+0 = -nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x00000000), 0x7fc00000, STR(__LINE__)); // nan * 0x0p+0 = nan
+}
+
+void f465(void) {
+ comp32(single_of_bits(0x00000000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x0p+0 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x80000000), 0x7fc00000, STR(__LINE__)); // nan * -0x0p+0 = nan
+ comp32(single_of_bits(0x80000000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x0p+0 * nan = nan
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // inf * inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -inf * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // inf * -inf = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -inf * -inf = inf
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p-149 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00000001), 0x7f800000, STR(__LINE__)); // inf * 0x1p-149 = inf
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p-149 * -inf = inf
+}
+
+void f466(void) {
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80000001), 0x7f800000, STR(__LINE__)); // -inf * -0x1p-149 = inf
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p-149 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80000001), 0xff800000, STR(__LINE__)); // inf * -0x1p-149 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x00000001), 0xff800000, STR(__LINE__)); // -inf * 0x1p-149 = -inf
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1p-149 * -inf = -inf
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p-148 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00000002), 0x7f800000, STR(__LINE__)); // inf * 0x1p-148 = inf
+ comp32(single_of_bits(0x80000002) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p-148 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80000002), 0x7f800000, STR(__LINE__)); // -inf * -0x1p-148 = inf
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p-148 * inf = -inf
+}
+
+void f467(void) {
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80000002), 0xff800000, STR(__LINE__)); // inf * -0x1p-148 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x00000002), 0xff800000, STR(__LINE__)); // -inf * 0x1p-148 = -inf
+ comp32(single_of_bits(0x00000002) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1p-148 * -inf = -inf
+ comp32(single_of_bits(0x00000003) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.8p-148 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00000003), 0x7f800000, STR(__LINE__)); // inf * 0x1.8p-148 = inf
+ comp32(single_of_bits(0x80000003) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1.8p-148 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80000003), 0x7f800000, STR(__LINE__)); // -inf * -0x1.8p-148 = inf
+ comp32(single_of_bits(0x80000003) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.8p-148 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80000003), 0xff800000, STR(__LINE__)); // inf * -0x1.8p-148 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x00000003), 0xff800000, STR(__LINE__)); // -inf * 0x1.8p-148 = -inf
+}
+
+void f468(void) {
+ comp32(single_of_bits(0x00000003) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.8p-148 * -inf = -inf
+ comp32(single_of_bits(0x00000004) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p-147 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00000004), 0x7f800000, STR(__LINE__)); // inf * 0x1p-147 = inf
+ comp32(single_of_bits(0x80000004) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p-147 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80000004), 0x7f800000, STR(__LINE__)); // -inf * -0x1p-147 = inf
+ comp32(single_of_bits(0x80000004) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p-147 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80000004), 0xff800000, STR(__LINE__)); // inf * -0x1p-147 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x00000004), 0xff800000, STR(__LINE__)); // -inf * 0x1p-147 = -inf
+ comp32(single_of_bits(0x00000004) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1p-147 * -inf = -inf
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp-127 * inf = inf
+}
+
+void f469(void) {
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x007fffff), 0x7f800000, STR(__LINE__)); // inf * 0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1.fffffcp-127 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x807fffff), 0x7f800000, STR(__LINE__)); // -inf * -0x1.fffffcp-127 = inf
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.fffffcp-127 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x807fffff), 0xff800000, STR(__LINE__)); // inf * -0x1.fffffcp-127 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x007fffff), 0xff800000, STR(__LINE__)); // -inf * 0x1.fffffcp-127 = -inf
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.fffffcp-127 * -inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x3f800000), 0x7f800000, STR(__LINE__)); // inf * 0x1p+0 = inf
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p+0 * inf = inf
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p+1 * inf = -inf
+}
+
+void f470(void) {
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xc0000000), 0xff800000, STR(__LINE__)); // inf * -0x1p+1 = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xc0400000), 0xff800000, STR(__LINE__)); // inf * -0x1.8p+1 = -inf
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.8p+1 * inf = -inf
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p+2 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0xc0800000), 0x7f800000, STR(__LINE__)); // -inf * -0x1p+2 = inf
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.4p+2 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x40a00000), 0x7f800000, STR(__LINE__)); // inf * 0x1.4p+2 = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x40c00000), 0xff800000, STR(__LINE__)); // -inf * 0x1.8p+2 = -inf
+ comp32(single_of_bits(0x40c00000) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.8p+2 * -inf = -inf
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0xff800000), 0xff800000, STR(__LINE__)); // 0x1.cp+2 * -inf = -inf
+}
+
+void f471(void) {
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x40e00000), 0xff800000, STR(__LINE__)); // -inf * 0x1.cp+2 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0xc1000000), 0x7f800000, STR(__LINE__)); // -inf * -0x1p+3 = inf
+ comp32(single_of_bits(0xc1000000) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p+3 * -inf = inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p+127 * inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // inf * 0x1p+127 = inf
+ comp32(single_of_bits(0xfe800000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p+126 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xfe800000), 0xff800000, STR(__LINE__)); // inf * -0x1p+126 = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // inf * -0x1p+127 = -inf
+ comp32(single_of_bits(0xff000000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p+127 * inf = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0xfe800000), 0x7f800000, STR(__LINE__)); // -inf * -0x1p+126 = inf
+}
+
+void f472(void) {
+ comp32(single_of_bits(0xfe800000) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p+126 * -inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x7effffff), 0x7f800000, STR(__LINE__)); // inf * 0x1.fffffep+126 = inf
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+126 * inf = inf
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+125 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xfe7fffff), 0xff800000, STR(__LINE__)); // inf * -0x1.fffffep+125 = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // inf * -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 * inf = -inf
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1.fffffep+127 * -inf = inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0xff7fffff), 0x7f800000, STR(__LINE__)); // -inf * -0x1.fffffep+127 = inf
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1p-126 * inf = inf
+}
+
+void f473(void) {
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00800000), 0x7f800000, STR(__LINE__)); // inf * 0x1p-126 = inf
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1p-125 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x81000000), 0xff800000, STR(__LINE__)); // inf * -0x1p-125 = -inf
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80800000), 0x7f800000, STR(__LINE__)); // -inf * -0x1p-126 = inf
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1p-126 * -inf = inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x00ffffff), 0x7f800000, STR(__LINE__)); // inf * 0x1.fffffep-126 = inf
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x7f800000), 0x7f800000, STR(__LINE__)); // 0x1.fffffep-126 * inf = inf
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x7f800000), 0xff800000, STR(__LINE__)); // -0x1.000002p-126 * inf = -inf
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x80800001), 0xff800000, STR(__LINE__)); // inf * -0x1.000002p-126 = -inf
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0xff800000), 0x7f800000, STR(__LINE__)); // -0x1.fffffep-126 * -inf = inf
+}
+
+void f474(void) {
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x80ffffff), 0x7f800000, STR(__LINE__)); // -inf * -0x1.fffffep-126 = inf
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x7f800000), 0x7fc00000, STR(__LINE__)); // nan * inf = nan
+ comp32(single_of_bits(0x7f800000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // inf * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0xff800000), 0x7fc00000, STR(__LINE__)); // nan * -inf = nan
+ comp32(single_of_bits(0xff800000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -inf * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // nan * nan = nan
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffcp-127 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x007fffff), 0x7fc00000, STR(__LINE__)); // nan * 0x1.fffffcp-127 = nan
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffcp-127 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x807fffff), 0x7fc00000, STR(__LINE__)); // nan * -0x1.fffffcp-127 = nan
+}
+
+void f475(void) {
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x00000001), 0x7fc00000, STR(__LINE__)); // nan * 0x1p-149 = nan
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p-149 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x80000001), 0x7fc00000, STR(__LINE__)); // nan * -0x1p-149 = nan
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p-149 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x3f800000), 0x7fc00000, STR(__LINE__)); // nan * 0x1p+0 = nan
+ comp32(single_of_bits(0x3f800000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1p+0 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0xbf800000), 0x7fc00000, STR(__LINE__)); // nan * -0x1p+0 = nan
+ comp32(single_of_bits(0xbf800000) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1p+0 * nan = nan
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0x7f7fffff), 0x7fc00000, STR(__LINE__)); // nan * 0x1.fffffep+127 = nan
+ comp32(single_of_bits(0x7f7fffff) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // 0x1.fffffep+127 * nan = nan
+}
+
+void f476(void) {
+ comp32(single_of_bits(0x7fc00000) * single_of_bits(0xff7fffff), 0x7fc00000, STR(__LINE__)); // nan * -0x1.fffffep+127 = nan
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0x7fc00000), 0x7fc00000, STR(__LINE__)); // -0x1.fffffep+127 * nan = nan
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x40000000), 0x7f800000, STR(__LINE__)); // 0x1p+127 * 0x1p+1 = inf
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1p+1 * 0x1p+127 = inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1p+127 * 0x1p+127 = inf
+ comp32(single_of_bits(0x7f000009) * single_of_bits(0x7f7ffffa), 0x7f800000, STR(__LINE__)); // 0x1.000012p+127 * 0x1.fffff4p+127 = inf
+ comp32(single_of_bits(0x7f7ffffa) * single_of_bits(0x7f000009), 0x7f800000, STR(__LINE__)); // 0x1.fffff4p+127 * 0x1.000012p+127 = inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0x7f7ffffe), 0x7f800000, STR(__LINE__)); // 0x1p+127 * 0x1.fffffcp+127 = inf
+ comp32(single_of_bits(0x7f7ffffe) * single_of_bits(0x7f000000), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp+127 * 0x1p+127 = inf
+ comp32(single_of_bits(0xc09ffffe) * single_of_bits(0xff000000), 0x7f800000, STR(__LINE__)); // -0x1.3ffffcp+2 * -0x1p+127 = inf
+}
+
+void f477(void) {
+ comp32(single_of_bits(0xff000000) * single_of_bits(0xc09ffffe), 0x7f800000, STR(__LINE__)); // -0x1p+127 * -0x1.3ffffcp+2 = inf
+ comp32(single_of_bits(0xc1100001) * single_of_bits(0xff000000), 0x7f800000, STR(__LINE__)); // -0x1.200002p+3 * -0x1p+127 = inf
+ comp32(single_of_bits(0xff000000) * single_of_bits(0xc1100001), 0x7f800000, STR(__LINE__)); // -0x1p+127 * -0x1.200002p+3 = inf
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xff000000), 0x7f800000, STR(__LINE__)); // -0x1.8p+1 * -0x1p+127 = inf
+ comp32(single_of_bits(0xff000000) * single_of_bits(0xc0400000), 0x7f800000, STR(__LINE__)); // -0x1p+127 * -0x1.8p+1 = inf
+ comp32(single_of_bits(0xff000005) * single_of_bits(0xff000001), 0x7f800000, STR(__LINE__)); // -0x1.00000ap+127 * -0x1.000002p+127 = inf
+ comp32(single_of_bits(0xff000001) * single_of_bits(0xff000005), 0x7f800000, STR(__LINE__)); // -0x1.000002p+127 * -0x1.00000ap+127 = inf
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0xff7fffff), 0x7f800000, STR(__LINE__)); // -0x1.fffffep+127 * -0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7ffffd) * single_of_bits(0xff000001), 0x7f800000, STR(__LINE__)); // -0x1.fffffap+127 * -0x1.000002p+127 = inf
+ comp32(single_of_bits(0xff000001) * single_of_bits(0xff7ffffd), 0x7f800000, STR(__LINE__)); // -0x1.000002p+127 * -0x1.fffffap+127 = inf
+}
+
+void f478(void) {
+ comp32(single_of_bits(0xff7ffffd) * single_of_bits(0xc0400001), 0x7f800000, STR(__LINE__)); // -0x1.fffffap+127 * -0x1.800002p+1 = inf
+ comp32(single_of_bits(0xc0400001) * single_of_bits(0xff7ffffd), 0x7f800000, STR(__LINE__)); // -0x1.800002p+1 * -0x1.fffffap+127 = inf
+ comp32(single_of_bits(0xc03ffffe) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.7ffffcp+1 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xc03ffffe), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.7ffffcp+1 = -inf
+ comp32(single_of_bits(0xc0dffff9) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.bffff2p+2 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xc0dffff9), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.bffff2p+2 = -inf
+ comp32(single_of_bits(0xc1100000) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.2p+3 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xc1100000), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.2p+3 = -inf
+ comp32(single_of_bits(0xff7ffffd) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.fffffap+127 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xff7ffffd), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.fffffap+127 = -inf
+}
+
+void f479(void) {
+ comp32(single_of_bits(0xfe7ffff9) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.fffff2p+125 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xfe7ffff9), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.fffff2p+125 = -inf
+ comp32(single_of_bits(0xfefffff7) * single_of_bits(0x7e800001), 0xff800000, STR(__LINE__)); // -0x1.ffffeep+126 * 0x1.000002p+126 = -inf
+ comp32(single_of_bits(0x7e800001) * single_of_bits(0xfefffff7), 0xff800000, STR(__LINE__)); // 0x1.000002p+126 * -0x1.ffffeep+126 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xfe800004), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1.000008p+126 = -inf
+ comp32(single_of_bits(0xfe800004) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1.000008p+126 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xfe800000), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1p+126 = -inf
+ comp32(single_of_bits(0xfe800000) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1p+126 * 0x1p+127 = -inf
+ comp32(single_of_bits(0x7f000000) * single_of_bits(0xff000000), 0xff800000, STR(__LINE__)); // 0x1p+127 * -0x1p+127 = -inf
+ comp32(single_of_bits(0xff000000) * single_of_bits(0x7f000000), 0xff800000, STR(__LINE__)); // -0x1p+127 * 0x1p+127 = -inf
+}
+
+void f480(void) {
+ comp32(single_of_bits(0x7f000009) * single_of_bits(0xc0c00002), 0xff800000, STR(__LINE__)); // 0x1.000012p+127 * -0x1.800004p+2 = -inf
+ comp32(single_of_bits(0xc0c00002) * single_of_bits(0x7f000009), 0xff800000, STR(__LINE__)); // -0x1.800004p+2 * 0x1.000012p+127 = -inf
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x7f7fffff), 0x7f800000, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffffep+127 = inf
+ comp32(single_of_bits(0x7f7fffff) * single_of_bits(0x3f800001), 0x7f800000, STR(__LINE__)); // 0x1.fffffep+127 * 0x1.000002p+0 = inf
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xff7fffff), 0x7f800000, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffffep+127 = inf
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0xbf800001), 0x7f800000, STR(__LINE__)); // -0x1.fffffep+127 * -0x1.000002p+0 = inf
+ comp32(single_of_bits(0x3f800002) * single_of_bits(0x7f7ffffe), 0x7f800000, STR(__LINE__)); // 0x1.000004p+0 * 0x1.fffffcp+127 = inf
+ comp32(single_of_bits(0x7f7ffffe) * single_of_bits(0x3f800002), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp+127 * 0x1.000004p+0 = inf
+ comp32(single_of_bits(0xbf800002) * single_of_bits(0xff7ffffe), 0x7f800000, STR(__LINE__)); // -0x1.000004p+0 * -0x1.fffffcp+127 = inf
+ comp32(single_of_bits(0xff7ffffe) * single_of_bits(0xbf800002), 0x7f800000, STR(__LINE__)); // -0x1.fffffcp+127 * -0x1.000004p+0 = inf
+}
+
+void f481(void) {
+ comp32(single_of_bits(0x3f800004) * single_of_bits(0x7f7ffffc), 0x7f800000, STR(__LINE__)); // 0x1.000008p+0 * 0x1.fffff8p+127 = inf
+ comp32(single_of_bits(0x7f7ffffc) * single_of_bits(0x3f800004), 0x7f800000, STR(__LINE__)); // 0x1.fffff8p+127 * 0x1.000008p+0 = inf
+ comp32(single_of_bits(0xbf800004) * single_of_bits(0xff7ffffc), 0x7f800000, STR(__LINE__)); // -0x1.000008p+0 * -0x1.fffff8p+127 = inf
+ comp32(single_of_bits(0xff7ffffc) * single_of_bits(0xbf800004), 0x7f800000, STR(__LINE__)); // -0x1.fffff8p+127 * -0x1.000008p+0 = inf
+ comp32(single_of_bits(0x3f800008) * single_of_bits(0x7f7ffff8), 0x7f800000, STR(__LINE__)); // 0x1.00001p+0 * 0x1.fffffp+127 = inf
+ comp32(single_of_bits(0x7f7ffff8) * single_of_bits(0x3f800008), 0x7f800000, STR(__LINE__)); // 0x1.fffffp+127 * 0x1.00001p+0 = inf
+ comp32(single_of_bits(0xbf800008) * single_of_bits(0xff7ffff8), 0x7f800000, STR(__LINE__)); // -0x1.00001p+0 * -0x1.fffffp+127 = inf
+ comp32(single_of_bits(0xff7ffff8) * single_of_bits(0xbf800008), 0x7f800000, STR(__LINE__)); // -0x1.fffffp+127 * -0x1.00001p+0 = inf
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x7f7fffff), 0xff800000, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0x7f7fffff) * single_of_bits(0xbf800001), 0xff800000, STR(__LINE__)); // 0x1.fffffep+127 * -0x1.000002p+0 = -inf
+}
+
+void f482(void) {
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0x3f800001), 0xff800000, STR(__LINE__)); // -0x1.fffffep+127 * 0x1.000002p+0 = -inf
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xff7fffff), 0xff800000, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffffep+127 = -inf
+ comp32(single_of_bits(0xbf800002) * single_of_bits(0x7f7ffffe), 0xff800000, STR(__LINE__)); // -0x1.000004p+0 * 0x1.fffffcp+127 = -inf
+ comp32(single_of_bits(0x7f7ffffe) * single_of_bits(0xbf800002), 0xff800000, STR(__LINE__)); // 0x1.fffffcp+127 * -0x1.000004p+0 = -inf
+ comp32(single_of_bits(0xff7ffffe) * single_of_bits(0x3f800002), 0xff800000, STR(__LINE__)); // -0x1.fffffcp+127 * 0x1.000004p+0 = -inf
+ comp32(single_of_bits(0x3f800002) * single_of_bits(0xff7ffffe), 0xff800000, STR(__LINE__)); // 0x1.000004p+0 * -0x1.fffffcp+127 = -inf
+ comp32(single_of_bits(0xbf800004) * single_of_bits(0x7f7ffffc), 0xff800000, STR(__LINE__)); // -0x1.000008p+0 * 0x1.fffff8p+127 = -inf
+ comp32(single_of_bits(0x7f7ffffc) * single_of_bits(0xbf800004), 0xff800000, STR(__LINE__)); // 0x1.fffff8p+127 * -0x1.000008p+0 = -inf
+ comp32(single_of_bits(0xff7ffffc) * single_of_bits(0x3f800004), 0xff800000, STR(__LINE__)); // -0x1.fffff8p+127 * 0x1.000008p+0 = -inf
+ comp32(single_of_bits(0x3f800004) * single_of_bits(0xff7ffffc), 0xff800000, STR(__LINE__)); // 0x1.000008p+0 * -0x1.fffff8p+127 = -inf
+}
+
+void f483(void) {
+ comp32(single_of_bits(0xbf800008) * single_of_bits(0x7f7ffff8), 0xff800000, STR(__LINE__)); // -0x1.00001p+0 * 0x1.fffffp+127 = -inf
+ comp32(single_of_bits(0x7f7ffff8) * single_of_bits(0xbf800008), 0xff800000, STR(__LINE__)); // 0x1.fffffp+127 * -0x1.00001p+0 = -inf
+ comp32(single_of_bits(0xff7ffff8) * single_of_bits(0x3f800008), 0xff800000, STR(__LINE__)); // -0x1.fffffp+127 * 0x1.00001p+0 = -inf
+ comp32(single_of_bits(0x3f800008) * single_of_bits(0xff7ffff8), 0xff800000, STR(__LINE__)); // 0x1.00001p+0 * -0x1.fffffp+127 = -inf
+ comp32(single_of_bits(0x7efffffd) * single_of_bits(0xc0000008), 0xff800000, STR(__LINE__)); // 0x1.fffffap+126 * -0x1.00001p+1 = -inf
+ comp32(single_of_bits(0xc0000008) * single_of_bits(0x7efffffd), 0xff800000, STR(__LINE__)); // -0x1.00001p+1 * 0x1.fffffap+126 = -inf
+ comp32(single_of_bits(0x3ffffffc) * single_of_bits(0x7f000002), 0x7f800000, STR(__LINE__)); // 0x1.fffff8p+0 * 0x1.000004p+127 = inf
+ comp32(single_of_bits(0x7f000002) * single_of_bits(0x3ffffffc), 0x7f800000, STR(__LINE__)); // 0x1.000004p+127 * 0x1.fffff8p+0 = inf
+ comp32(single_of_bits(0xbffffffc) * single_of_bits(0xff000002), 0x7f800000, STR(__LINE__)); // -0x1.fffff8p+0 * -0x1.000004p+127 = inf
+ comp32(single_of_bits(0xff000002) * single_of_bits(0xbffffffc), 0x7f800000, STR(__LINE__)); // -0x1.000004p+127 * -0x1.fffff8p+0 = inf
+}
+
+void f484(void) {
+ comp32(single_of_bits(0xbffffffc) * single_of_bits(0x7f000002), 0xff800000, STR(__LINE__)); // -0x1.fffff8p+0 * 0x1.000004p+127 = -inf
+ comp32(single_of_bits(0x7f000002) * single_of_bits(0xbffffffc), 0xff800000, STR(__LINE__)); // 0x1.000004p+127 * -0x1.fffff8p+0 = -inf
+ comp32(single_of_bits(0xff000002) * single_of_bits(0x3ffffffc), 0xff800000, STR(__LINE__)); // -0x1.000004p+127 * 0x1.fffff8p+0 = -inf
+ comp32(single_of_bits(0x3ffffffc) * single_of_bits(0xff000002), 0xff800000, STR(__LINE__)); // 0x1.fffff8p+0 * -0x1.000004p+127 = -inf
+ comp32(single_of_bits(0x3ffffffe) * single_of_bits(0x7f000001), 0x7f800000, STR(__LINE__)); // 0x1.fffffcp+0 * 0x1.000002p+127 = inf
+ comp32(single_of_bits(0x7f000001) * single_of_bits(0x3ffffffe), 0x7f800000, STR(__LINE__)); // 0x1.000002p+127 * 0x1.fffffcp+0 = inf
+ comp32(single_of_bits(0xbffffffe) * single_of_bits(0xff000001), 0x7f800000, STR(__LINE__)); // -0x1.fffffcp+0 * -0x1.000002p+127 = inf
+ comp32(single_of_bits(0xff000001) * single_of_bits(0xbffffffe), 0x7f800000, STR(__LINE__)); // -0x1.000002p+127 * -0x1.fffffcp+0 = inf
+ comp32(single_of_bits(0xbffffffe) * single_of_bits(0x7f000001), 0xff800000, STR(__LINE__)); // -0x1.fffffcp+0 * 0x1.000002p+127 = -inf
+ comp32(single_of_bits(0x7f000001) * single_of_bits(0xbffffffe), 0xff800000, STR(__LINE__)); // 0x1.000002p+127 * -0x1.fffffcp+0 = -inf
+}
+
+void f485(void) {
+ comp32(single_of_bits(0xff000001) * single_of_bits(0x3ffffffe), 0xff800000, STR(__LINE__)); // -0x1.000002p+127 * 0x1.fffffcp+0 = -inf
+ comp32(single_of_bits(0x3ffffffe) * single_of_bits(0xff000001), 0xff800000, STR(__LINE__)); // 0x1.fffffcp+0 * -0x1.000002p+127 = -inf
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x01000000), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x1p-125 * 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x81000000), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x1p-125 * -0x1p-126 = 0x0p+0
+}
+
+void f486(void) {
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x01000000), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x1p-125 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x81000000), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x1p-125 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x01000000), 0x00000000, STR(__LINE__)); // 0x1p-125 * 0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x81000000), 0x00000000, STR(__LINE__)); // -0x1p-125 * -0x1p-125 = 0x0p+0
+ comp32(single_of_bits(0x81000000) * single_of_bits(0x01000000), 0x80000000, STR(__LINE__)); // -0x1p-125 * 0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x01000000) * single_of_bits(0x81000000), 0x80000000, STR(__LINE__)); // 0x1p-125 * -0x1p-125 = -0x0p+0
+ comp32(single_of_bits(0x32800000) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x1p-26 * 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x32800000), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x1p-26 = 0x0p+0
+}
+
+void f487(void) {
+ comp32(single_of_bits(0xb2800000) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x1p-26 * -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb2800000), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x1p-26 = 0x0p+0
+ comp32(single_of_bits(0xb2800000) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x1p-26 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb2800000), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x1p-26 = -0x0p+0
+ comp32(single_of_bits(0x32800000) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x1p-26 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x32800000), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x1p-26 = -0x0p+0
+ comp32(single_of_bits(0x807ffff7) * single_of_bits(0x01000003), 0x80000000, STR(__LINE__)); // -0x1.ffffdcp-127 * 0x1.000006p-125 = -0x0p+0
+ comp32(single_of_bits(0x01000003) * single_of_bits(0x807ffff7), 0x80000000, STR(__LINE__)); // 0x1.000006p-125 * -0x1.ffffdcp-127 = -0x0p+0
+ comp32(single_of_bits(0x007ffff7) * single_of_bits(0x81000003), 0x80000000, STR(__LINE__)); // 0x1.ffffdcp-127 * -0x1.000006p-125 = -0x0p+0
+ comp32(single_of_bits(0x81000003) * single_of_bits(0x007ffff7), 0x80000000, STR(__LINE__)); // -0x1.000006p-125 * 0x1.ffffdcp-127 = -0x0p+0
+}
+
+void f488(void) {
+ comp32(single_of_bits(0x007ffff7) * single_of_bits(0x01000003), 0x00000000, STR(__LINE__)); // 0x1.ffffdcp-127 * 0x1.000006p-125 = 0x0p+0
+ comp32(single_of_bits(0x01000003) * single_of_bits(0x007ffff7), 0x00000000, STR(__LINE__)); // 0x1.000006p-125 * 0x1.ffffdcp-127 = 0x0p+0
+ comp32(single_of_bits(0x807ffff7) * single_of_bits(0x81000003), 0x00000000, STR(__LINE__)); // -0x1.ffffdcp-127 * -0x1.000006p-125 = 0x0p+0
+ comp32(single_of_bits(0x81000003) * single_of_bits(0x807ffff7), 0x00000000, STR(__LINE__)); // -0x1.000006p-125 * -0x1.ffffdcp-127 = 0x0p+0
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x007ffffe), 0x00000000, STR(__LINE__)); // 0x1.fffffcp-127 * 0x1.fffff8p-127 = 0x0p+0
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0x007fffff), 0x00000000, STR(__LINE__)); // 0x1.fffff8p-127 * 0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x807ffffe), 0x00000000, STR(__LINE__)); // -0x1.fffffcp-127 * -0x1.fffff8p-127 = 0x0p+0
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0x807fffff), 0x00000000, STR(__LINE__)); // -0x1.fffff8p-127 * -0x1.fffffcp-127 = 0x0p+0
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x007ffffe), 0x80000000, STR(__LINE__)); // -0x1.fffffcp-127 * 0x1.fffff8p-127 = -0x0p+0
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0x807fffff), 0x80000000, STR(__LINE__)); // 0x1.fffff8p-127 * -0x1.fffffcp-127 = -0x0p+0
+}
+
+void f489(void) {
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x807ffffe), 0x80000000, STR(__LINE__)); // 0x1.fffffcp-127 * -0x1.fffff8p-127 = -0x0p+0
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0x007fffff), 0x80000000, STR(__LINE__)); // -0x1.fffff8p-127 * 0x1.fffffcp-127 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3e000000), 0x00000000, STR(__LINE__)); // 0x1p-149 * 0x1p-3 = 0x0p+0
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x1p-3 * 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbe000000), 0x00000000, STR(__LINE__)); // -0x1p-149 * -0x1p-3 = 0x0p+0
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // -0x1p-3 * -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3e000000), 0x80000000, STR(__LINE__)); // -0x1p-149 * 0x1p-3 = -0x0p+0
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0x80000001), 0x80000000, STR(__LINE__)); // 0x1p-3 * -0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbe000000), 0x80000000, STR(__LINE__)); // 0x1p-149 * -0x1p-3 = -0x0p+0
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0x00000001), 0x80000000, STR(__LINE__)); // -0x1p-3 * 0x1p-149 = -0x0p+0
+}
+
+void f490(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x1p-149 * 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x00000001), 0x80000000, STR(__LINE__)); // -0x1p-149 * 0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x80000001), 0x80000000, STR(__LINE__)); // 0x1p-149 * -0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // -0x1p-149 * -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x32ffffff) * single_of_bits(0x00800001), 0x00000000, STR(__LINE__)); // 0x1.fffffep-26 * 0x1.000002p-126 = 0x0p+0
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x32ffffff), 0x00000000, STR(__LINE__)); // 0x1.000002p-126 * 0x1.fffffep-26 = 0x0p+0
+ comp32(single_of_bits(0xb2ffffff) * single_of_bits(0x80800001), 0x00000000, STR(__LINE__)); // -0x1.fffffep-26 * -0x1.000002p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xb2ffffff), 0x00000000, STR(__LINE__)); // -0x1.000002p-126 * -0x1.fffffep-26 = 0x0p+0
+ comp32(single_of_bits(0xb2ffffff) * single_of_bits(0x00800001), 0x80000000, STR(__LINE__)); // -0x1.fffffep-26 * 0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xb2ffffff), 0x80000000, STR(__LINE__)); // 0x1.000002p-126 * -0x1.fffffep-26 = -0x0p+0
+}
+
+void f491(void) {
+ comp32(single_of_bits(0x32ffffff) * single_of_bits(0x80800001), 0x80000000, STR(__LINE__)); // 0x1.fffffep-26 * -0x1.000002p-126 = -0x0p+0
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x32ffffff), 0x80000000, STR(__LINE__)); // -0x1.000002p-126 * 0x1.fffffep-26 = -0x0p+0
+ comp32(single_of_bits(0x337fffff) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x1.fffffep-25 * 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x337fffff), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x1.fffffep-25 = 0x0p+0
+ comp32(single_of_bits(0xb37fffff) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x1.fffffep-25 * -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb37fffff), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x1.fffffep-25 = 0x0p+0
+ comp32(single_of_bits(0xb37fffff) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x1.fffffep-25 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb37fffff), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x1.fffffep-25 = -0x0p+0
+ comp32(single_of_bits(0x337fffff) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x1.fffffep-25 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x337fffff), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x1.fffffep-25 = -0x0p+0
+}
+
+void f492(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3e800000), 0x00000000, STR(__LINE__)); // 0x1p-149 * 0x1p-2 = 0x0p+0
+ comp32(single_of_bits(0x3e800000) * single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x1p-2 * 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbe800000), 0x00000000, STR(__LINE__)); // -0x1p-149 * -0x1p-2 = 0x0p+0
+ comp32(single_of_bits(0xbe800000) * single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // -0x1p-2 * -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3e800000), 0x80000000, STR(__LINE__)); // -0x1p-149 * 0x1p-2 = -0x0p+0
+ comp32(single_of_bits(0x3e800000) * single_of_bits(0x80000001), 0x80000000, STR(__LINE__)); // 0x1p-2 * -0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0xbe800000) * single_of_bits(0x00000001), 0x80000000, STR(__LINE__)); // -0x1p-2 * 0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbe800000), 0x80000000, STR(__LINE__)); // 0x1p-149 * -0x1p-2 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3fbfffff), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1.7ffffep+0 = 0x1p-149
+ comp32(single_of_bits(0x3fbfffff) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1.7ffffep+0 * 0x1p-149 = 0x1p-149
+}
+
+void f493(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbfbfffff), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1.7ffffep+0 = 0x1p-149
+ comp32(single_of_bits(0xbfbfffff) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1.7ffffep+0 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3fbfffff), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1.7ffffep+0 = -0x1p-149
+ comp32(single_of_bits(0x3fbfffff) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1.7ffffep+0 * -0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0xbfbfffff) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1.7ffffep+0 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbfbfffff), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1.7ffffep+0 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x405fffff), 0x00000003, STR(__LINE__)); // 0x1p-149 * 0x1.bffffep+1 = 0x1.8p-148
+ comp32(single_of_bits(0x405fffff) * single_of_bits(0x00000001), 0x00000003, STR(__LINE__)); // 0x1.bffffep+1 * 0x1p-149 = 0x1.8p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc05fffff), 0x00000003, STR(__LINE__)); // -0x1p-149 * -0x1.bffffep+1 = 0x1.8p-148
+ comp32(single_of_bits(0xc05fffff) * single_of_bits(0x80000001), 0x00000003, STR(__LINE__)); // -0x1.bffffep+1 * -0x1p-149 = 0x1.8p-148
+}
+
+void f494(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x405fffff), 0x80000003, STR(__LINE__)); // -0x1p-149 * 0x1.bffffep+1 = -0x1.8p-148
+ comp32(single_of_bits(0x405fffff) * single_of_bits(0x80000001), 0x80000003, STR(__LINE__)); // 0x1.bffffep+1 * -0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0xc05fffff) * single_of_bits(0x00000001), 0x80000003, STR(__LINE__)); // -0x1.bffffep+1 * 0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc05fffff), 0x80000003, STR(__LINE__)); // 0x1p-149 * -0x1.bffffep+1 = -0x1.8p-148
+ comp32(single_of_bits(0x33800000) * single_of_bits(0x00800000), 0x00000000, STR(__LINE__)); // 0x1p-24 * 0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x33800000), 0x00000000, STR(__LINE__)); // 0x1p-126 * 0x1p-24 = 0x0p+0
+ comp32(single_of_bits(0xb3800000) * single_of_bits(0x80800000), 0x00000000, STR(__LINE__)); // -0x1p-24 * -0x1p-126 = 0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb3800000), 0x00000000, STR(__LINE__)); // -0x1p-126 * -0x1p-24 = 0x0p+0
+ comp32(single_of_bits(0xb3800000) * single_of_bits(0x00800000), 0x80000000, STR(__LINE__)); // -0x1p-24 * 0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb3800000), 0x80000000, STR(__LINE__)); // 0x1p-126 * -0x1p-24 = -0x0p+0
+}
+
+void f495(void) {
+ comp32(single_of_bits(0x33800000) * single_of_bits(0x80800000), 0x80000000, STR(__LINE__)); // 0x1p-24 * -0x1p-126 = -0x0p+0
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x33800000), 0x80000000, STR(__LINE__)); // -0x1p-126 * 0x1p-24 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f000000), 0x00000000, STR(__LINE__)); // 0x1p-149 * 0x1p-1 = 0x0p+0
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00000001), 0x00000000, STR(__LINE__)); // 0x1p-1 * 0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf000000), 0x00000000, STR(__LINE__)); // -0x1p-149 * -0x1p-1 = 0x0p+0
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80000001), 0x00000000, STR(__LINE__)); // -0x1p-1 * -0x1p-149 = 0x0p+0
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80000001), 0x80000000, STR(__LINE__)); // 0x1p-1 * -0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f000000), 0x80000000, STR(__LINE__)); // -0x1p-149 * 0x1p-1 = -0x0p+0
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00000001), 0x80000000, STR(__LINE__)); // -0x1p-1 * 0x1p-149 = -0x0p+0
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf000000), 0x80000000, STR(__LINE__)); // 0x1p-149 * -0x1p-1 = -0x0p+0
+}
+
+void f496(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3fc00000), 0x00000002, STR(__LINE__)); // 0x1p-149 * 0x1.8p+0 = 0x1p-148
+ comp32(single_of_bits(0x3fc00000) * single_of_bits(0x00000001), 0x00000002, STR(__LINE__)); // 0x1.8p+0 * 0x1p-149 = 0x1p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbfc00000), 0x00000002, STR(__LINE__)); // -0x1p-149 * -0x1.8p+0 = 0x1p-148
+ comp32(single_of_bits(0xbfc00000) * single_of_bits(0x80000001), 0x00000002, STR(__LINE__)); // -0x1.8p+0 * -0x1p-149 = 0x1p-148
+ comp32(single_of_bits(0x3fc00000) * single_of_bits(0x80000001), 0x80000002, STR(__LINE__)); // 0x1.8p+0 * -0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3fc00000), 0x80000002, STR(__LINE__)); // -0x1p-149 * 0x1.8p+0 = -0x1p-148
+ comp32(single_of_bits(0xbfc00000) * single_of_bits(0x00000001), 0x80000002, STR(__LINE__)); // -0x1.8p+0 * 0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbfc00000), 0x80000002, STR(__LINE__)); // 0x1p-149 * -0x1.8p+0 = -0x1p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40200000), 0x00000002, STR(__LINE__)); // 0x1p-149 * 0x1.4p+1 = 0x1p-148
+ comp32(single_of_bits(0x40200000) * single_of_bits(0x00000001), 0x00000002, STR(__LINE__)); // 0x1.4p+1 * 0x1p-149 = 0x1p-148
+}
+
+void f497(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0200000), 0x00000002, STR(__LINE__)); // -0x1p-149 * -0x1.4p+1 = 0x1p-148
+ comp32(single_of_bits(0xc0200000) * single_of_bits(0x80000001), 0x00000002, STR(__LINE__)); // -0x1.4p+1 * -0x1p-149 = 0x1p-148
+ comp32(single_of_bits(0x40200000) * single_of_bits(0x80000001), 0x80000002, STR(__LINE__)); // 0x1.4p+1 * -0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40200000), 0x80000002, STR(__LINE__)); // -0x1p-149 * 0x1.4p+1 = -0x1p-148
+ comp32(single_of_bits(0xc0200000) * single_of_bits(0x00000001), 0x80000002, STR(__LINE__)); // -0x1.4p+1 * 0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0200000), 0x80000002, STR(__LINE__)); // 0x1p-149 * -0x1.4p+1 = -0x1p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40600000), 0x00000004, STR(__LINE__)); // 0x1p-149 * 0x1.cp+1 = 0x1p-147
+ comp32(single_of_bits(0x40600000) * single_of_bits(0x00000001), 0x00000004, STR(__LINE__)); // 0x1.cp+1 * 0x1p-149 = 0x1p-147
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0600000), 0x00000004, STR(__LINE__)); // -0x1p-149 * -0x1.cp+1 = 0x1p-147
+ comp32(single_of_bits(0xc0600000) * single_of_bits(0x80000001), 0x00000004, STR(__LINE__)); // -0x1.cp+1 * -0x1p-149 = 0x1p-147
+}
+
+void f498(void) {
+ comp32(single_of_bits(0x40600000) * single_of_bits(0x80000001), 0x80000004, STR(__LINE__)); // 0x1.cp+1 * -0x1p-149 = -0x1p-147
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40600000), 0x80000004, STR(__LINE__)); // -0x1p-149 * 0x1.cp+1 = -0x1p-147
+ comp32(single_of_bits(0xc0600000) * single_of_bits(0x00000001), 0x80000004, STR(__LINE__)); // -0x1.cp+1 * 0x1p-149 = -0x1p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0600000), 0x80000004, STR(__LINE__)); // 0x1p-149 * -0x1.cp+1 = -0x1p-147
+ comp32(single_of_bits(0x33a00000) * single_of_bits(0x00800000), 0x00000001, STR(__LINE__)); // 0x1.4p-24 * 0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x33a00000), 0x00000001, STR(__LINE__)); // 0x1p-126 * 0x1.4p-24 = 0x1p-149
+ comp32(single_of_bits(0xb3a00000) * single_of_bits(0x80800000), 0x00000001, STR(__LINE__)); // -0x1.4p-24 * -0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb3a00000), 0x00000001, STR(__LINE__)); // -0x1p-126 * -0x1.4p-24 = 0x1p-149
+ comp32(single_of_bits(0xb3a00000) * single_of_bits(0x00800000), 0x80000001, STR(__LINE__)); // -0x1.4p-24 * 0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb3a00000), 0x80000001, STR(__LINE__)); // 0x1p-126 * -0x1.4p-24 = -0x1p-149
+}
+
+void f499(void) {
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x33a00000), 0x80000001, STR(__LINE__)); // -0x1p-126 * 0x1.4p-24 = -0x1p-149
+ comp32(single_of_bits(0x33a00000) * single_of_bits(0x80800000), 0x80000001, STR(__LINE__)); // 0x1.4p-24 * -0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f000001), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1.000002p-1 = 0x1p-149
+ comp32(single_of_bits(0x3f000001) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1.000002p-1 * 0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf000001), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1.000002p-1 = 0x1p-149
+ comp32(single_of_bits(0xbf000001) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1.000002p-1 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f000001), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1.000002p-1 = -0x1p-149
+ comp32(single_of_bits(0x3f000001) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1.000002p-1 * -0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0xbf000001) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1.000002p-1 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf000001), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1.000002p-1 = -0x1p-149
+}
+
+void f500(void) {
+ comp32(single_of_bits(0x40200001) * single_of_bits(0x00000001), 0x00000003, STR(__LINE__)); // 0x1.400002p+1 * 0x1p-149 = 0x1.8p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40200001), 0x00000003, STR(__LINE__)); // 0x1p-149 * 0x1.400002p+1 = 0x1.8p-148
+ comp32(single_of_bits(0xc0200001) * single_of_bits(0x80000001), 0x00000003, STR(__LINE__)); // -0x1.400002p+1 * -0x1p-149 = 0x1.8p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0200001), 0x00000003, STR(__LINE__)); // -0x1p-149 * -0x1.400002p+1 = 0x1.8p-148
+ comp32(single_of_bits(0xc0200001) * single_of_bits(0x00000001), 0x80000003, STR(__LINE__)); // -0x1.400002p+1 * 0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0200001), 0x80000003, STR(__LINE__)); // 0x1p-149 * -0x1.400002p+1 = -0x1.8p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40200001), 0x80000003, STR(__LINE__)); // -0x1p-149 * 0x1.400002p+1 = -0x1.8p-148
+ comp32(single_of_bits(0x40200001) * single_of_bits(0x80000001), 0x80000003, STR(__LINE__)); // 0x1.400002p+1 * -0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0x33000001) * single_of_bits(0x00ffffff), 0x00000001, STR(__LINE__)); // 0x1.000002p-25 * 0x1.fffffep-126 = 0x1p-149
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x33000001), 0x00000001, STR(__LINE__)); // 0x1.fffffep-126 * 0x1.000002p-25 = 0x1p-149
+}
+
+void f501(void) {
+ comp32(single_of_bits(0xb3000001) * single_of_bits(0x80ffffff), 0x00000001, STR(__LINE__)); // -0x1.000002p-25 * -0x1.fffffep-126 = 0x1p-149
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0xb3000001), 0x00000001, STR(__LINE__)); // -0x1.fffffep-126 * -0x1.000002p-25 = 0x1p-149
+ comp32(single_of_bits(0xb3000001) * single_of_bits(0x00ffffff), 0x80000001, STR(__LINE__)); // -0x1.000002p-25 * 0x1.fffffep-126 = -0x1p-149
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0xb3000001), 0x80000001, STR(__LINE__)); // 0x1.fffffep-126 * -0x1.000002p-25 = -0x1p-149
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x33000001), 0x80000001, STR(__LINE__)); // -0x1.fffffep-126 * 0x1.000002p-25 = -0x1p-149
+ comp32(single_of_bits(0x33000001) * single_of_bits(0x80ffffff), 0x80000001, STR(__LINE__)); // 0x1.000002p-25 * -0x1.fffffep-126 = -0x1p-149
+ comp32(single_of_bits(0x33c00000) * single_of_bits(0x00800000), 0x00000001, STR(__LINE__)); // 0x1.8p-24 * 0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x33c00000), 0x00000001, STR(__LINE__)); // 0x1p-126 * 0x1.8p-24 = 0x1p-149
+ comp32(single_of_bits(0xb3c00000) * single_of_bits(0x80800000), 0x00000001, STR(__LINE__)); // -0x1.8p-24 * -0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb3c00000), 0x00000001, STR(__LINE__)); // -0x1p-126 * -0x1.8p-24 = 0x1p-149
+}
+
+void f502(void) {
+ comp32(single_of_bits(0xb3c00000) * single_of_bits(0x00800000), 0x80000001, STR(__LINE__)); // -0x1.8p-24 * 0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb3c00000), 0x80000001, STR(__LINE__)); // 0x1p-126 * -0x1.8p-24 = -0x1p-149
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x33c00000), 0x80000001, STR(__LINE__)); // -0x1p-126 * 0x1.8p-24 = -0x1p-149
+ comp32(single_of_bits(0x33c00000) * single_of_bits(0x80800000), 0x80000001, STR(__LINE__)); // 0x1.8p-24 * -0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f400000), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1.8p-1 = 0x1p-149
+ comp32(single_of_bits(0x3f400000) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1.8p-1 * 0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf400000), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1.8p-1 = 0x1p-149
+ comp32(single_of_bits(0xbf400000) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1.8p-1 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f400000), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1.8p-1 = -0x1p-149
+ comp32(single_of_bits(0x3f400000) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1.8p-1 * -0x1p-149 = -0x1p-149
+}
+
+void f503(void) {
+ comp32(single_of_bits(0xbf400000) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1.8p-1 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf400000), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1.8p-1 = -0x1p-149
+ comp32(single_of_bits(0x33e00000) * single_of_bits(0x00800000), 0x00000001, STR(__LINE__)); // 0x1.cp-24 * 0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x33e00000), 0x00000001, STR(__LINE__)); // 0x1p-126 * 0x1.cp-24 = 0x1p-149
+ comp32(single_of_bits(0xb3e00000) * single_of_bits(0x80800000), 0x00000001, STR(__LINE__)); // -0x1.cp-24 * -0x1p-126 = 0x1p-149
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xb3e00000), 0x00000001, STR(__LINE__)); // -0x1p-126 * -0x1.cp-24 = 0x1p-149
+ comp32(single_of_bits(0xb3e00000) * single_of_bits(0x00800000), 0x80000001, STR(__LINE__)); // -0x1.cp-24 * 0x1p-126 = -0x1p-149
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xb3e00000), 0x80000001, STR(__LINE__)); // 0x1p-126 * -0x1.cp-24 = -0x1p-149
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x33e00000), 0x80000001, STR(__LINE__)); // -0x1p-126 * 0x1.cp-24 = -0x1p-149
+ comp32(single_of_bits(0x33e00000) * single_of_bits(0x80800000), 0x80000001, STR(__LINE__)); // 0x1.cp-24 * -0x1p-126 = -0x1p-149
+}
+
+void f504(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f400001), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1.800002p-1 = 0x1p-149
+ comp32(single_of_bits(0x3f400001) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1.800002p-1 * 0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf400001), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1.800002p-1 = 0x1p-149
+ comp32(single_of_bits(0xbf400001) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1.800002p-1 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f400001), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1.800002p-1 = -0x1p-149
+ comp32(single_of_bits(0x3f400001) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1.800002p-1 * -0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0xbf400001) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1.800002p-1 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf400001), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1.800002p-1 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x3f7fffff), 0x00000001, STR(__LINE__)); // 0x1p-149 * 0x1.fffffep-1 = 0x1p-149
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x00000001), 0x00000001, STR(__LINE__)); // 0x1.fffffep-1 * 0x1p-149 = 0x1p-149
+}
+
+void f505(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xbf7fffff), 0x00000001, STR(__LINE__)); // -0x1p-149 * -0x1.fffffep-1 = 0x1p-149
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x80000001), 0x00000001, STR(__LINE__)); // -0x1.fffffep-1 * -0x1p-149 = 0x1p-149
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x3f7fffff), 0x80000001, STR(__LINE__)); // -0x1p-149 * 0x1.fffffep-1 = -0x1p-149
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x80000001), 0x80000001, STR(__LINE__)); // 0x1.fffffep-1 * -0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xbf7fffff), 0x80000001, STR(__LINE__)); // 0x1p-149 * -0x1.fffffep-1 = -0x1p-149
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x00000001), 0x80000001, STR(__LINE__)); // -0x1.fffffep-1 * 0x1p-149 = -0x1p-149
+ comp32(single_of_bits(0x337fffff) * single_of_bits(0x00ffffff), 0x00000001, STR(__LINE__)); // 0x1.fffffep-25 * 0x1.fffffep-126 = 0x1p-149
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x337fffff), 0x00000001, STR(__LINE__)); // 0x1.fffffep-126 * 0x1.fffffep-25 = 0x1p-149
+ comp32(single_of_bits(0xb37fffff) * single_of_bits(0x80ffffff), 0x00000001, STR(__LINE__)); // -0x1.fffffep-25 * -0x1.fffffep-126 = 0x1p-149
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0xb37fffff), 0x00000001, STR(__LINE__)); // -0x1.fffffep-126 * -0x1.fffffep-25 = 0x1p-149
+}
+
+void f506(void) {
+ comp32(single_of_bits(0xb37fffff) * single_of_bits(0x00ffffff), 0x80000001, STR(__LINE__)); // -0x1.fffffep-25 * 0x1.fffffep-126 = -0x1p-149
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0xb37fffff), 0x80000001, STR(__LINE__)); // 0x1.fffffep-126 * -0x1.fffffep-25 = -0x1p-149
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x337fffff), 0x80000001, STR(__LINE__)); // -0x1.fffffep-126 * 0x1.fffffep-25 = -0x1p-149
+ comp32(single_of_bits(0x337fffff) * single_of_bits(0x80ffffff), 0x80000001, STR(__LINE__)); // 0x1.fffffep-25 * -0x1.fffffep-126 = -0x1p-149
+ comp32(single_of_bits(0x007ffffc) * single_of_bits(0x3f800001), 0x007ffffd, STR(__LINE__)); // 0x1.fffffp-127 * 0x1.000002p+0 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x007ffffc), 0x007ffffd, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffffp-127 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x807ffffc) * single_of_bits(0xbf800001), 0x007ffffd, STR(__LINE__)); // -0x1.fffffp-127 * -0x1.000002p+0 = 0x1.fffff4p-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x807ffffc), 0x007ffffd, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffffp-127 = 0x1.fffff4p-127
+ comp32(single_of_bits(0x807ffffc) * single_of_bits(0x3f800001), 0x807ffffd, STR(__LINE__)); // -0x1.fffffp-127 * 0x1.000002p+0 = -0x1.fffff4p-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x807ffffc), 0x807ffffd, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffffp-127 = -0x1.fffff4p-127
+}
+
+void f507(void) {
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x007ffffc), 0x807ffffd, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffffp-127 = -0x1.fffff4p-127
+ comp32(single_of_bits(0x007ffffc) * single_of_bits(0xbf800001), 0x807ffffd, STR(__LINE__)); // 0x1.fffffp-127 * -0x1.000002p+0 = -0x1.fffff4p-127
+ comp32(single_of_bits(0x3f400000) * single_of_bits(0x00800001), 0x00600001, STR(__LINE__)); // 0x1.8p-1 * 0x1.000002p-126 = 0x1.800004p-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x3f400000), 0x00600001, STR(__LINE__)); // 0x1.000002p-126 * 0x1.8p-1 = 0x1.800004p-127
+ comp32(single_of_bits(0xbf400000) * single_of_bits(0x80800001), 0x00600001, STR(__LINE__)); // -0x1.8p-1 * -0x1.000002p-126 = 0x1.800004p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xbf400000), 0x00600001, STR(__LINE__)); // -0x1.000002p-126 * -0x1.8p-1 = 0x1.800004p-127
+ comp32(single_of_bits(0xbf400000) * single_of_bits(0x00800001), 0x80600001, STR(__LINE__)); // -0x1.8p-1 * 0x1.000002p-126 = -0x1.800004p-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xbf400000), 0x80600001, STR(__LINE__)); // 0x1.000002p-126 * -0x1.8p-1 = -0x1.800004p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x3f400000), 0x80600001, STR(__LINE__)); // -0x1.000002p-126 * 0x1.8p-1 = -0x1.800004p-127
+ comp32(single_of_bits(0x3f400000) * single_of_bits(0x80800001), 0x80600001, STR(__LINE__)); // 0x1.8p-1 * -0x1.000002p-126 = -0x1.800004p-127
+}
+
+void f508(void) {
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0x3f800001), 0x007fffff, STR(__LINE__)); // 0x1.fffff8p-127 * 0x1.000002p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x007ffffe), 0x007fffff, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffff8p-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0xbf800001), 0x007fffff, STR(__LINE__)); // -0x1.fffff8p-127 * -0x1.000002p+0 = 0x1.fffffcp-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x807ffffe), 0x007fffff, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffff8p-127 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0x3f800001), 0x807fffff, STR(__LINE__)); // -0x1.fffff8p-127 * 0x1.000002p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x807ffffe), 0x807fffff, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffff8p-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x007ffffe), 0x807fffff, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffff8p-127 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0xbf800001), 0x807fffff, STR(__LINE__)); // 0x1.fffff8p-127 * -0x1.000002p+0 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x007ffff8) * single_of_bits(0x3f800001), 0x007ffff9, STR(__LINE__)); // 0x1.ffffep-127 * 0x1.000002p+0 = 0x1.ffffe4p-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x007ffff8), 0x007ffff9, STR(__LINE__)); // 0x1.000002p+0 * 0x1.ffffep-127 = 0x1.ffffe4p-127
+}
+
+void f509(void) {
+ comp32(single_of_bits(0x807ffff8) * single_of_bits(0xbf800001), 0x007ffff9, STR(__LINE__)); // -0x1.ffffep-127 * -0x1.000002p+0 = 0x1.ffffe4p-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x807ffff8), 0x007ffff9, STR(__LINE__)); // -0x1.000002p+0 * -0x1.ffffep-127 = 0x1.ffffe4p-127
+ comp32(single_of_bits(0x807ffff8) * single_of_bits(0x3f800001), 0x807ffff9, STR(__LINE__)); // -0x1.ffffep-127 * 0x1.000002p+0 = -0x1.ffffe4p-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x807ffff8), 0x807ffff9, STR(__LINE__)); // 0x1.000002p+0 * -0x1.ffffep-127 = -0x1.ffffe4p-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x007ffff8), 0x807ffff9, STR(__LINE__)); // -0x1.000002p+0 * 0x1.ffffep-127 = -0x1.ffffe4p-127
+ comp32(single_of_bits(0x007ffff8) * single_of_bits(0xbf800001), 0x807ffff9, STR(__LINE__)); // 0x1.ffffep-127 * -0x1.000002p+0 = -0x1.ffffe4p-127
+ comp32(single_of_bits(0x007ffff7) * single_of_bits(0x3f800001), 0x007ffff8, STR(__LINE__)); // 0x1.ffffdcp-127 * 0x1.000002p+0 = 0x1.ffffep-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x007ffff7), 0x007ffff8, STR(__LINE__)); // 0x1.000002p+0 * 0x1.ffffdcp-127 = 0x1.ffffep-127
+ comp32(single_of_bits(0x807ffff7) * single_of_bits(0xbf800001), 0x007ffff8, STR(__LINE__)); // -0x1.ffffdcp-127 * -0x1.000002p+0 = 0x1.ffffep-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x807ffff7), 0x007ffff8, STR(__LINE__)); // -0x1.000002p+0 * -0x1.ffffdcp-127 = 0x1.ffffep-127
+}
+
+void f510(void) {
+ comp32(single_of_bits(0x807ffff7) * single_of_bits(0x3f800001), 0x807ffff8, STR(__LINE__)); // -0x1.ffffdcp-127 * 0x1.000002p+0 = -0x1.ffffep-127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x807ffff7), 0x807ffff8, STR(__LINE__)); // 0x1.000002p+0 * -0x1.ffffdcp-127 = -0x1.ffffep-127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x007ffff7), 0x807ffff8, STR(__LINE__)); // -0x1.000002p+0 * 0x1.ffffdcp-127 = -0x1.ffffep-127
+ comp32(single_of_bits(0x007ffff7) * single_of_bits(0xbf800001), 0x807ffff8, STR(__LINE__)); // 0x1.ffffdcp-127 * -0x1.000002p+0 = -0x1.ffffep-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x3f7ffffa), 0x007ffffe, STR(__LINE__)); // 0x1.000002p-126 * 0x1.fffff4p-1 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x3f7ffffa) * single_of_bits(0x00800001), 0x007ffffe, STR(__LINE__)); // 0x1.fffff4p-1 * 0x1.000002p-126 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xbf7ffffa), 0x007ffffe, STR(__LINE__)); // -0x1.000002p-126 * -0x1.fffff4p-1 = 0x1.fffff8p-127
+ comp32(single_of_bits(0xbf7ffffa) * single_of_bits(0x80800001), 0x007ffffe, STR(__LINE__)); // -0x1.fffff4p-1 * -0x1.000002p-126 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x3f7ffffa), 0x807ffffe, STR(__LINE__)); // -0x1.000002p-126 * 0x1.fffff4p-1 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x3f7ffffa) * single_of_bits(0x80800001), 0x807ffffe, STR(__LINE__)); // 0x1.fffff4p-1 * -0x1.000002p-126 = -0x1.fffff8p-127
+}
+
+void f511(void) {
+ comp32(single_of_bits(0xbf7ffffa) * single_of_bits(0x00800001), 0x807ffffe, STR(__LINE__)); // -0x1.fffff4p-1 * 0x1.000002p-126 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xbf7ffffa), 0x807ffffe, STR(__LINE__)); // 0x1.000002p-126 * -0x1.fffff4p-1 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0x3f7ffffc), 0x007ffffc, STR(__LINE__)); // 0x1.fffff8p-127 * 0x1.fffff8p-1 = 0x1.fffffp-127
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0x007ffffe), 0x007ffffc, STR(__LINE__)); // 0x1.fffff8p-1 * 0x1.fffff8p-127 = 0x1.fffffp-127
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0xbf7ffffc), 0x007ffffc, STR(__LINE__)); // -0x1.fffff8p-127 * -0x1.fffff8p-1 = 0x1.fffffp-127
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0x807ffffe), 0x007ffffc, STR(__LINE__)); // -0x1.fffff8p-1 * -0x1.fffff8p-127 = 0x1.fffffp-127
+ comp32(single_of_bits(0x807ffffe) * single_of_bits(0x3f7ffffc), 0x807ffffc, STR(__LINE__)); // -0x1.fffff8p-127 * 0x1.fffff8p-1 = -0x1.fffffp-127
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0x807ffffe), 0x807ffffc, STR(__LINE__)); // 0x1.fffff8p-1 * -0x1.fffff8p-127 = -0x1.fffffp-127
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0x007ffffe), 0x807ffffc, STR(__LINE__)); // -0x1.fffff8p-1 * 0x1.fffff8p-127 = -0x1.fffffp-127
+ comp32(single_of_bits(0x007ffffe) * single_of_bits(0xbf7ffffc), 0x807ffffc, STR(__LINE__)); // 0x1.fffff8p-127 * -0x1.fffff8p-1 = -0x1.fffffp-127
+}
+
+void f512(void) {
+ comp32(single_of_bits(0x3effffff) * single_of_bits(0x00800003), 0x00400001, STR(__LINE__)); // 0x1.fffffep-2 * 0x1.000006p-126 = 0x1.000004p-127
+ comp32(single_of_bits(0x00800003) * single_of_bits(0x3effffff), 0x00400001, STR(__LINE__)); // 0x1.000006p-126 * 0x1.fffffep-2 = 0x1.000004p-127
+ comp32(single_of_bits(0xbeffffff) * single_of_bits(0x80800003), 0x00400001, STR(__LINE__)); // -0x1.fffffep-2 * -0x1.000006p-126 = 0x1.000004p-127
+ comp32(single_of_bits(0x80800003) * single_of_bits(0xbeffffff), 0x00400001, STR(__LINE__)); // -0x1.000006p-126 * -0x1.fffffep-2 = 0x1.000004p-127
+ comp32(single_of_bits(0xbeffffff) * single_of_bits(0x00800003), 0x80400001, STR(__LINE__)); // -0x1.fffffep-2 * 0x1.000006p-126 = -0x1.000004p-127
+ comp32(single_of_bits(0x00800003) * single_of_bits(0xbeffffff), 0x80400001, STR(__LINE__)); // 0x1.000006p-126 * -0x1.fffffep-2 = -0x1.000004p-127
+ comp32(single_of_bits(0x80800003) * single_of_bits(0x3effffff), 0x80400001, STR(__LINE__)); // -0x1.000006p-126 * 0x1.fffffep-2 = -0x1.000004p-127
+ comp32(single_of_bits(0x3effffff) * single_of_bits(0x80800003), 0x80400001, STR(__LINE__)); // 0x1.fffffep-2 * -0x1.000006p-126 = -0x1.000004p-127
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x3f800001), 0x00800000, STR(__LINE__)); // 0x1.fffffcp-127 * 0x1.000002p+0 = 0x1p-126
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x007fffff), 0x00800000, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffffcp-127 = 0x1p-126
+}
+
+void f513(void) {
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0xbf800001), 0x00800000, STR(__LINE__)); // -0x1.fffffcp-127 * -0x1.000002p+0 = 0x1p-126
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x807fffff), 0x00800000, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffffcp-127 = 0x1p-126
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x3f800001), 0x80800000, STR(__LINE__)); // -0x1.fffffcp-127 * 0x1.000002p+0 = -0x1p-126
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x807fffff), 0x80800000, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffffcp-127 = -0x1p-126
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x007fffff), 0x80800000, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffffcp-127 = -0x1p-126
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0xbf800001), 0x80800000, STR(__LINE__)); // 0x1.fffffcp-127 * -0x1.000002p+0 = -0x1p-126
+ comp32(single_of_bits(0x3f800008) * single_of_bits(0x007ffff8), 0x00800000, STR(__LINE__)); // 0x1.00001p+0 * 0x1.ffffep-127 = 0x1p-126
+ comp32(single_of_bits(0x007ffff8) * single_of_bits(0x3f800008), 0x00800000, STR(__LINE__)); // 0x1.ffffep-127 * 0x1.00001p+0 = 0x1p-126
+ comp32(single_of_bits(0x807ffff8) * single_of_bits(0xbf800008), 0x00800000, STR(__LINE__)); // -0x1.ffffep-127 * -0x1.00001p+0 = 0x1p-126
+ comp32(single_of_bits(0xbf800008) * single_of_bits(0x807ffff8), 0x00800000, STR(__LINE__)); // -0x1.00001p+0 * -0x1.ffffep-127 = 0x1p-126
+}
+
+void f514(void) {
+ comp32(single_of_bits(0x807ffff8) * single_of_bits(0x3f800008), 0x80800000, STR(__LINE__)); // -0x1.ffffep-127 * 0x1.00001p+0 = -0x1p-126
+ comp32(single_of_bits(0x3f800008) * single_of_bits(0x807ffff8), 0x80800000, STR(__LINE__)); // 0x1.00001p+0 * -0x1.ffffep-127 = -0x1p-126
+ comp32(single_of_bits(0xbf800008) * single_of_bits(0x007ffff8), 0x80800000, STR(__LINE__)); // -0x1.00001p+0 * 0x1.ffffep-127 = -0x1p-126
+ comp32(single_of_bits(0x007ffff8) * single_of_bits(0xbf800008), 0x80800000, STR(__LINE__)); // 0x1.ffffep-127 * -0x1.00001p+0 = -0x1p-126
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x3f7ffffe), 0x00800000, STR(__LINE__)); // 0x1.000002p-126 * 0x1.fffffcp-1 = 0x1p-126
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x00800001), 0x00800000, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1.000002p-126 = 0x1p-126
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xbf7ffffe), 0x00800000, STR(__LINE__)); // -0x1.000002p-126 * -0x1.fffffcp-1 = 0x1p-126
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x80800001), 0x00800000, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1.000002p-126 = 0x1p-126
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x3f7ffffe), 0x80800000, STR(__LINE__)); // -0x1.000002p-126 * 0x1.fffffcp-1 = -0x1p-126
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x80800001), 0x80800000, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1.000002p-126 = -0x1p-126
+}
+
+void f515(void) {
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x00800001), 0x80800000, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1.000002p-126 = -0x1p-126
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xbf7ffffe), 0x80800000, STR(__LINE__)); // 0x1.000002p-126 * -0x1.fffffcp-1 = -0x1p-126
+ comp32(single_of_bits(0x00800002) * single_of_bits(0x3f7ffffc), 0x00800000, STR(__LINE__)); // 0x1.000004p-126 * 0x1.fffff8p-1 = 0x1p-126
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0x00800002), 0x00800000, STR(__LINE__)); // 0x1.fffff8p-1 * 0x1.000004p-126 = 0x1p-126
+ comp32(single_of_bits(0x80800002) * single_of_bits(0xbf7ffffc), 0x00800000, STR(__LINE__)); // -0x1.000004p-126 * -0x1.fffff8p-1 = 0x1p-126
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0x80800002), 0x00800000, STR(__LINE__)); // -0x1.fffff8p-1 * -0x1.000004p-126 = 0x1p-126
+ comp32(single_of_bits(0x80800002) * single_of_bits(0x3f7ffffc), 0x80800000, STR(__LINE__)); // -0x1.000004p-126 * 0x1.fffff8p-1 = -0x1p-126
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0x80800002), 0x80800000, STR(__LINE__)); // 0x1.fffff8p-1 * -0x1.000004p-126 = -0x1p-126
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0x00800002), 0x80800000, STR(__LINE__)); // -0x1.fffff8p-1 * 0x1.000004p-126 = -0x1p-126
+ comp32(single_of_bits(0x00800002) * single_of_bits(0xbf7ffffc), 0x80800000, STR(__LINE__)); // 0x1.000004p-126 * -0x1.fffff8p-1 = -0x1p-126
+}
+
+void f516(void) {
+ comp32(single_of_bits(0x3f000002) * single_of_bits(0x00fffffb), 0x007fffff, STR(__LINE__)); // 0x1.000004p-1 * 0x1.fffff6p-126 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x00fffffb) * single_of_bits(0x3f000002), 0x007fffff, STR(__LINE__)); // 0x1.fffff6p-126 * 0x1.000004p-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0xbf000002) * single_of_bits(0x80fffffb), 0x007fffff, STR(__LINE__)); // -0x1.000004p-1 * -0x1.fffff6p-126 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80fffffb) * single_of_bits(0xbf000002), 0x007fffff, STR(__LINE__)); // -0x1.fffff6p-126 * -0x1.000004p-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0xbf000002) * single_of_bits(0x00fffffb), 0x807fffff, STR(__LINE__)); // -0x1.000004p-1 * 0x1.fffff6p-126 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00fffffb) * single_of_bits(0xbf000002), 0x807fffff, STR(__LINE__)); // 0x1.fffff6p-126 * -0x1.000004p-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x80fffffb) * single_of_bits(0x3f000002), 0x807fffff, STR(__LINE__)); // -0x1.fffff6p-126 * 0x1.000004p-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x3f000002) * single_of_bits(0x80fffffb), 0x807fffff, STR(__LINE__)); // 0x1.000004p-1 * -0x1.fffff6p-126 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x3f000000), 0x00800000, STR(__LINE__)); // 0x1.fffffep-126 * 0x1p-1 = 0x1p-126
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00ffffff), 0x00800000, STR(__LINE__)); // 0x1p-1 * 0x1.fffffep-126 = 0x1p-126
+}
+
+void f517(void) {
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0xbf000000), 0x00800000, STR(__LINE__)); // -0x1.fffffep-126 * -0x1p-1 = 0x1p-126
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80ffffff), 0x00800000, STR(__LINE__)); // -0x1p-1 * -0x1.fffffep-126 = 0x1p-126
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x3f000000), 0x80800000, STR(__LINE__)); // -0x1.fffffep-126 * 0x1p-1 = -0x1p-126
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80ffffff), 0x80800000, STR(__LINE__)); // 0x1p-1 * -0x1.fffffep-126 = -0x1p-126
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00ffffff), 0x80800000, STR(__LINE__)); // -0x1p-1 * 0x1.fffffep-126 = -0x1p-126
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0xbf000000), 0x80800000, STR(__LINE__)); // 0x1.fffffep-126 * -0x1p-1 = -0x1p-126
+ comp32(single_of_bits(0x00800000) * single_of_bits(0x3f7ffffe), 0x007fffff, STR(__LINE__)); // 0x1p-126 * 0x1.fffffcp-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x00800000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1p-126 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80800000) * single_of_bits(0xbf7ffffe), 0x007fffff, STR(__LINE__)); // -0x1p-126 * -0x1.fffffcp-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x80800000), 0x007fffff, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1p-126 = 0x1.fffffcp-127
+}
+
+void f518(void) {
+ comp32(single_of_bits(0x80800000) * single_of_bits(0x3f7ffffe), 0x807fffff, STR(__LINE__)); // -0x1p-126 * 0x1.fffffcp-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x80800000), 0x807fffff, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1p-126 = -0x1.fffffcp-127
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x00800000), 0x807fffff, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1p-126 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00800000) * single_of_bits(0xbf7ffffe), 0x807fffff, STR(__LINE__)); // 0x1p-126 * -0x1.fffffcp-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00fffffe) * single_of_bits(0x3f000000), 0x007fffff, STR(__LINE__)); // 0x1.fffffcp-126 * 0x1p-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00fffffe), 0x007fffff, STR(__LINE__)); // 0x1p-1 * 0x1.fffffcp-126 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80fffffe) * single_of_bits(0xbf000000), 0x007fffff, STR(__LINE__)); // -0x1.fffffcp-126 * -0x1p-1 = 0x1.fffffcp-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80fffffe), 0x007fffff, STR(__LINE__)); // -0x1p-1 * -0x1.fffffcp-126 = 0x1.fffffcp-127
+ comp32(single_of_bits(0x80fffffe) * single_of_bits(0x3f000000), 0x807fffff, STR(__LINE__)); // -0x1.fffffcp-126 * 0x1p-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80fffffe), 0x807fffff, STR(__LINE__)); // 0x1p-1 * -0x1.fffffcp-126 = -0x1.fffffcp-127
+}
+
+void f519(void) {
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00fffffe), 0x807fffff, STR(__LINE__)); // -0x1p-1 * 0x1.fffffcp-126 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00fffffe) * single_of_bits(0xbf000000), 0x807fffff, STR(__LINE__)); // 0x1.fffffcp-126 * -0x1p-1 = -0x1.fffffcp-127
+ comp32(single_of_bits(0x00fffffc) * single_of_bits(0x3f000000), 0x007ffffe, STR(__LINE__)); // 0x1.fffff8p-126 * 0x1p-1 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00fffffc), 0x007ffffe, STR(__LINE__)); // 0x1p-1 * 0x1.fffff8p-126 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x80fffffc) * single_of_bits(0xbf000000), 0x007ffffe, STR(__LINE__)); // -0x1.fffff8p-126 * -0x1p-1 = 0x1.fffff8p-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80fffffc), 0x007ffffe, STR(__LINE__)); // -0x1p-1 * -0x1.fffff8p-126 = 0x1.fffff8p-127
+ comp32(single_of_bits(0x80fffffc) * single_of_bits(0x3f000000), 0x807ffffe, STR(__LINE__)); // -0x1.fffff8p-126 * 0x1p-1 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80fffffc), 0x807ffffe, STR(__LINE__)); // 0x1p-1 * -0x1.fffff8p-126 = -0x1.fffff8p-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00fffffc), 0x807ffffe, STR(__LINE__)); // -0x1p-1 * 0x1.fffff8p-126 = -0x1.fffff8p-127
+ comp32(single_of_bits(0x00fffffc) * single_of_bits(0xbf000000), 0x807ffffe, STR(__LINE__)); // 0x1.fffff8p-126 * -0x1p-1 = -0x1.fffff8p-127
+}
+
+void f520(void) {
+ comp32(single_of_bits(0x00fffff8) * single_of_bits(0x3f000000), 0x007ffffc, STR(__LINE__)); // 0x1.fffffp-126 * 0x1p-1 = 0x1.fffffp-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00fffff8), 0x007ffffc, STR(__LINE__)); // 0x1p-1 * 0x1.fffffp-126 = 0x1.fffffp-127
+ comp32(single_of_bits(0x80fffff8) * single_of_bits(0xbf000000), 0x007ffffc, STR(__LINE__)); // -0x1.fffffp-126 * -0x1p-1 = 0x1.fffffp-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80fffff8), 0x007ffffc, STR(__LINE__)); // -0x1p-1 * -0x1.fffffp-126 = 0x1.fffffp-127
+ comp32(single_of_bits(0x80fffff8) * single_of_bits(0x3f000000), 0x807ffffc, STR(__LINE__)); // -0x1.fffffp-126 * 0x1p-1 = -0x1.fffffp-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80fffff8), 0x807ffffc, STR(__LINE__)); // 0x1p-1 * -0x1.fffffp-126 = -0x1.fffffp-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00fffff8), 0x807ffffc, STR(__LINE__)); // -0x1p-1 * 0x1.fffffp-126 = -0x1.fffffp-127
+ comp32(single_of_bits(0x00fffff8) * single_of_bits(0xbf000000), 0x807ffffc, STR(__LINE__)); // 0x1.fffffp-126 * -0x1p-1 = -0x1.fffffp-127
+ comp32(single_of_bits(0x00000008) * single_of_bits(0x3e000000), 0x00000001, STR(__LINE__)); // 0x1p-146 * 0x1p-3 = 0x1p-149
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0x00000008), 0x00000001, STR(__LINE__)); // 0x1p-3 * 0x1p-146 = 0x1p-149
+}
+
+void f521(void) {
+ comp32(single_of_bits(0x80000008) * single_of_bits(0xbe000000), 0x00000001, STR(__LINE__)); // -0x1p-146 * -0x1p-3 = 0x1p-149
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0x80000008), 0x00000001, STR(__LINE__)); // -0x1p-3 * -0x1p-146 = 0x1p-149
+ comp32(single_of_bits(0x80000008) * single_of_bits(0x3e000000), 0x80000001, STR(__LINE__)); // -0x1p-146 * 0x1p-3 = -0x1p-149
+ comp32(single_of_bits(0x3e000000) * single_of_bits(0x80000008), 0x80000001, STR(__LINE__)); // 0x1p-3 * -0x1p-146 = -0x1p-149
+ comp32(single_of_bits(0xbe000000) * single_of_bits(0x00000008), 0x80000001, STR(__LINE__)); // -0x1p-3 * 0x1p-146 = -0x1p-149
+ comp32(single_of_bits(0x00000008) * single_of_bits(0xbe000000), 0x80000001, STR(__LINE__)); // 0x1p-146 * -0x1p-3 = -0x1p-149
+ comp32(single_of_bits(0x00000006) * single_of_bits(0x3f000000), 0x00000003, STR(__LINE__)); // 0x1.8p-147 * 0x1p-1 = 0x1.8p-148
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00000006), 0x00000003, STR(__LINE__)); // 0x1p-1 * 0x1.8p-147 = 0x1.8p-148
+ comp32(single_of_bits(0x80000006) * single_of_bits(0xbf000000), 0x00000003, STR(__LINE__)); // -0x1.8p-147 * -0x1p-1 = 0x1.8p-148
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80000006), 0x00000003, STR(__LINE__)); // -0x1p-1 * -0x1.8p-147 = 0x1.8p-148
+}
+
+void f522(void) {
+ comp32(single_of_bits(0x80000006) * single_of_bits(0x3f000000), 0x80000003, STR(__LINE__)); // -0x1.8p-147 * 0x1p-1 = -0x1.8p-148
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80000006), 0x80000003, STR(__LINE__)); // 0x1p-1 * -0x1.8p-147 = -0x1.8p-148
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00000006), 0x80000003, STR(__LINE__)); // -0x1p-1 * 0x1.8p-147 = -0x1.8p-148
+ comp32(single_of_bits(0x00000006) * single_of_bits(0xbf000000), 0x80000003, STR(__LINE__)); // 0x1.8p-147 * -0x1p-1 = -0x1.8p-148
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0x3f7fffff), 0x403ffffe, STR(__LINE__)); // 0x1.7ffffep+1 * 0x1.fffffep-1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x403fffff), 0x403ffffe, STR(__LINE__)); // 0x1.fffffep-1 * 0x1.7ffffep+1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0xbf7fffff), 0x403ffffe, STR(__LINE__)); // -0x1.7ffffep+1 * -0x1.fffffep-1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0xc03fffff), 0x403ffffe, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.7ffffep+1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0x3f7fffff), 0xc03ffffe, STR(__LINE__)); // -0x1.7ffffep+1 * 0x1.fffffep-1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0xc03fffff), 0xc03ffffe, STR(__LINE__)); // 0x1.fffffep-1 * -0x1.7ffffep+1 = -0x1.7ffffcp+1
+}
+
+void f523(void) {
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x403fffff), 0xc03ffffe, STR(__LINE__)); // -0x1.fffffep-1 * 0x1.7ffffep+1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0xbf7fffff), 0xc03ffffe, STR(__LINE__)); // 0x1.7ffffep+1 * -0x1.fffffep-1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x409fffff) * single_of_bits(0x3f7fffff), 0x409ffffe, STR(__LINE__)); // 0x1.3ffffep+2 * 0x1.fffffep-1 = 0x1.3ffffcp+2
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x409fffff), 0x409ffffe, STR(__LINE__)); // 0x1.fffffep-1 * 0x1.3ffffep+2 = 0x1.3ffffcp+2
+ comp32(single_of_bits(0xc09fffff) * single_of_bits(0xbf7fffff), 0x409ffffe, STR(__LINE__)); // -0x1.3ffffep+2 * -0x1.fffffep-1 = 0x1.3ffffcp+2
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0xc09fffff), 0x409ffffe, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.3ffffep+2 = 0x1.3ffffcp+2
+ comp32(single_of_bits(0xc09fffff) * single_of_bits(0x3f7fffff), 0xc09ffffe, STR(__LINE__)); // -0x1.3ffffep+2 * 0x1.fffffep-1 = -0x1.3ffffcp+2
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0xc09fffff), 0xc09ffffe, STR(__LINE__)); // 0x1.fffffep-1 * -0x1.3ffffep+2 = -0x1.3ffffcp+2
+ comp32(single_of_bits(0x409fffff) * single_of_bits(0xbf7fffff), 0xc09ffffe, STR(__LINE__)); // 0x1.3ffffep+2 * -0x1.fffffep-1 = -0x1.3ffffcp+2
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x409fffff), 0xc09ffffe, STR(__LINE__)); // -0x1.fffffep-1 * 0x1.3ffffep+2 = -0x1.3ffffcp+2
+}
+
+void f524(void) {
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0x3f7fffff), 0x40dffffe, STR(__LINE__)); // 0x1.bffffep+2 * 0x1.fffffep-1 = 0x1.bffffcp+2
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x40dfffff), 0x40dffffe, STR(__LINE__)); // 0x1.fffffep-1 * 0x1.bffffep+2 = 0x1.bffffcp+2
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0xbf7fffff), 0x40dffffe, STR(__LINE__)); // -0x1.bffffep+2 * -0x1.fffffep-1 = 0x1.bffffcp+2
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0xc0dfffff), 0x40dffffe, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.bffffep+2 = 0x1.bffffcp+2
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0x3f7fffff), 0xc0dffffe, STR(__LINE__)); // -0x1.bffffep+2 * 0x1.fffffep-1 = -0x1.bffffcp+2
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0xc0dfffff), 0xc0dffffe, STR(__LINE__)); // 0x1.fffffep-1 * -0x1.bffffep+2 = -0x1.bffffcp+2
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x40dfffff), 0xc0dffffe, STR(__LINE__)); // -0x1.fffffep-1 * 0x1.bffffep+2 = -0x1.bffffcp+2
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0xbf7fffff), 0xc0dffffe, STR(__LINE__)); // 0x1.bffffep+2 * -0x1.fffffep-1 = -0x1.bffffcp+2
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0x3f7fffff), 0x00fffffe, STR(__LINE__)); // 0x1.fffffep-126 * 0x1.fffffep-1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x00ffffff), 0x00fffffe, STR(__LINE__)); // 0x1.fffffep-1 * 0x1.fffffep-126 = 0x1.fffffcp-126
+}
+
+void f525(void) {
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0xbf7fffff), 0x00fffffe, STR(__LINE__)); // -0x1.fffffep-126 * -0x1.fffffep-1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x80ffffff), 0x00fffffe, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.fffffep-126 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x80ffffff) * single_of_bits(0x3f7fffff), 0x80fffffe, STR(__LINE__)); // -0x1.fffffep-126 * 0x1.fffffep-1 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x80ffffff), 0x80fffffe, STR(__LINE__)); // 0x1.fffffep-1 * -0x1.fffffep-126 = -0x1.fffffcp-126
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x00ffffff), 0x80fffffe, STR(__LINE__)); // -0x1.fffffep-1 * 0x1.fffffep-126 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x00ffffff) * single_of_bits(0xbf7fffff), 0x80fffffe, STR(__LINE__)); // 0x1.fffffep-126 * -0x1.fffffep-1 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x7e7ffff9) * single_of_bits(0x407fffff), 0x7f7ffff8, STR(__LINE__)); // 0x1.fffff2p+125 * 0x1.fffffep+1 = 0x1.fffffp+127
+ comp32(single_of_bits(0x407fffff) * single_of_bits(0x7e7ffff9), 0x7f7ffff8, STR(__LINE__)); // 0x1.fffffep+1 * 0x1.fffff2p+125 = 0x1.fffffp+127
+ comp32(single_of_bits(0xfe7ffff9) * single_of_bits(0xc07fffff), 0x7f7ffff8, STR(__LINE__)); // -0x1.fffff2p+125 * -0x1.fffffep+1 = 0x1.fffffp+127
+ comp32(single_of_bits(0xc07fffff) * single_of_bits(0xfe7ffff9), 0x7f7ffff8, STR(__LINE__)); // -0x1.fffffep+1 * -0x1.fffff2p+125 = 0x1.fffffp+127
+}
+
+void f526(void) {
+ comp32(single_of_bits(0xfe7ffff9) * single_of_bits(0x407fffff), 0xff7ffff8, STR(__LINE__)); // -0x1.fffff2p+125 * 0x1.fffffep+1 = -0x1.fffffp+127
+ comp32(single_of_bits(0x407fffff) * single_of_bits(0xfe7ffff9), 0xff7ffff8, STR(__LINE__)); // 0x1.fffffep+1 * -0x1.fffff2p+125 = -0x1.fffffp+127
+ comp32(single_of_bits(0xc07fffff) * single_of_bits(0x7e7ffff9), 0xff7ffff8, STR(__LINE__)); // -0x1.fffffep+1 * 0x1.fffff2p+125 = -0x1.fffffp+127
+ comp32(single_of_bits(0x7e7ffff9) * single_of_bits(0xc07fffff), 0xff7ffff8, STR(__LINE__)); // 0x1.fffff2p+125 * -0x1.fffffep+1 = -0x1.fffffp+127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x3f800001), 0x3f800002, STR(__LINE__)); // 0x1.000002p+0 * 0x1.000002p+0 = 0x1.000004p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xbf800001), 0x3f800002, STR(__LINE__)); // -0x1.000002p+0 * -0x1.000002p+0 = 0x1.000004p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x3f800001), 0xbf800002, STR(__LINE__)); // -0x1.000002p+0 * 0x1.000002p+0 = -0x1.000004p+0
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xbf800001), 0xbf800002, STR(__LINE__)); // 0x1.000002p+0 * -0x1.000002p+0 = -0x1.000004p+0
+ comp32(single_of_bits(0x3f800002) * single_of_bits(0x3f800001), 0x3f800003, STR(__LINE__)); // 0x1.000004p+0 * 0x1.000002p+0 = 0x1.000006p+0
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x3f800002), 0x3f800003, STR(__LINE__)); // 0x1.000002p+0 * 0x1.000004p+0 = 0x1.000006p+0
+}
+
+void f527(void) {
+ comp32(single_of_bits(0xbf800002) * single_of_bits(0xbf800001), 0x3f800003, STR(__LINE__)); // -0x1.000004p+0 * -0x1.000002p+0 = 0x1.000006p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xbf800002), 0x3f800003, STR(__LINE__)); // -0x1.000002p+0 * -0x1.000004p+0 = 0x1.000006p+0
+ comp32(single_of_bits(0xbf800002) * single_of_bits(0x3f800001), 0xbf800003, STR(__LINE__)); // -0x1.000004p+0 * 0x1.000002p+0 = -0x1.000006p+0
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xbf800002), 0xbf800003, STR(__LINE__)); // 0x1.000002p+0 * -0x1.000004p+0 = -0x1.000006p+0
+ comp32(single_of_bits(0x3f800002) * single_of_bits(0xbf800001), 0xbf800003, STR(__LINE__)); // 0x1.000004p+0 * -0x1.000002p+0 = -0x1.000006p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x3f800002), 0xbf800003, STR(__LINE__)); // -0x1.000002p+0 * 0x1.000004p+0 = -0x1.000006p+0
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0xff7fffff), 0x7f7ffffe, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.fffffep+127 = 0x1.fffffcp+127
+ comp32(single_of_bits(0xff7fffff) * single_of_bits(0xbf7fffff), 0x7f7ffffe, STR(__LINE__)); // -0x1.fffffep+127 * -0x1.fffffep-1 = 0x1.fffffcp+127
+ comp32(single_of_bits(0x00400001) * single_of_bits(0x40000001), 0x00800003, STR(__LINE__)); // 0x1.000004p-127 * 0x1.000002p+1 = 0x1.000006p-126
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x00400001), 0x00800003, STR(__LINE__)); // 0x1.000002p+1 * 0x1.000004p-127 = 0x1.000006p-126
+}
+
+void f528(void) {
+ comp32(single_of_bits(0x80400001) * single_of_bits(0xc0000001), 0x00800003, STR(__LINE__)); // -0x1.000004p-127 * -0x1.000002p+1 = 0x1.000006p-126
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x80400001), 0x00800003, STR(__LINE__)); // -0x1.000002p+1 * -0x1.000004p-127 = 0x1.000006p-126
+ comp32(single_of_bits(0x80400001) * single_of_bits(0x40000001), 0x80800003, STR(__LINE__)); // -0x1.000004p-127 * 0x1.000002p+1 = -0x1.000006p-126
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x80400001), 0x80800003, STR(__LINE__)); // 0x1.000002p+1 * -0x1.000004p-127 = -0x1.000006p-126
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x00400001), 0x80800003, STR(__LINE__)); // -0x1.000002p+1 * 0x1.000004p-127 = -0x1.000006p-126
+ comp32(single_of_bits(0x00400001) * single_of_bits(0xc0000001), 0x80800003, STR(__LINE__)); // 0x1.000004p-127 * -0x1.000002p+1 = -0x1.000006p-126
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0x40400000), 0x410fffff, STR(__LINE__)); // 0x1.7ffffep+1 * 0x1.8p+1 = 0x1.1ffffep+3
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x403fffff), 0x410fffff, STR(__LINE__)); // 0x1.8p+1 * 0x1.7ffffep+1 = 0x1.1ffffep+3
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0xc0400000), 0x410fffff, STR(__LINE__)); // -0x1.7ffffep+1 * -0x1.8p+1 = 0x1.1ffffep+3
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xc03fffff), 0x410fffff, STR(__LINE__)); // -0x1.8p+1 * -0x1.7ffffep+1 = 0x1.1ffffep+3
+}
+
+void f529(void) {
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0x40400000), 0xc10fffff, STR(__LINE__)); // -0x1.7ffffep+1 * 0x1.8p+1 = -0x1.1ffffep+3
+ comp32(single_of_bits(0x40400000) * single_of_bits(0xc03fffff), 0xc10fffff, STR(__LINE__)); // 0x1.8p+1 * -0x1.7ffffep+1 = -0x1.1ffffep+3
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x403fffff), 0xc10fffff, STR(__LINE__)); // -0x1.8p+1 * 0x1.7ffffep+1 = -0x1.1ffffep+3
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0xc0400000), 0xc10fffff, STR(__LINE__)); // 0x1.7ffffep+1 * -0x1.8p+1 = -0x1.1ffffep+3
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x7e000001), 0x7f200001, STR(__LINE__)); // 0x1.4p+2 * 0x1.000002p+125 = 0x1.400002p+127
+ comp32(single_of_bits(0x7e000001) * single_of_bits(0x40a00000), 0x7f200001, STR(__LINE__)); // 0x1.000002p+125 * 0x1.4p+2 = 0x1.400002p+127
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0xfe000001), 0x7f200001, STR(__LINE__)); // -0x1.4p+2 * -0x1.000002p+125 = 0x1.400002p+127
+ comp32(single_of_bits(0xfe000001) * single_of_bits(0xc0a00000), 0x7f200001, STR(__LINE__)); // -0x1.000002p+125 * -0x1.4p+2 = 0x1.400002p+127
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0x7e000001), 0xff200001, STR(__LINE__)); // -0x1.4p+2 * 0x1.000002p+125 = -0x1.400002p+127
+ comp32(single_of_bits(0x7e000001) * single_of_bits(0xc0a00000), 0xff200001, STR(__LINE__)); // 0x1.000002p+125 * -0x1.4p+2 = -0x1.400002p+127
+}
+
+void f530(void) {
+ comp32(single_of_bits(0xfe000001) * single_of_bits(0x40a00000), 0xff200001, STR(__LINE__)); // -0x1.000002p+125 * 0x1.4p+2 = -0x1.400002p+127
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0xfe000001), 0xff200001, STR(__LINE__)); // 0x1.4p+2 * -0x1.000002p+125 = -0x1.400002p+127
+ comp32(single_of_bits(0x01200000) * single_of_bits(0x40000001), 0x01a00001, STR(__LINE__)); // 0x1.4p-125 * 0x1.000002p+1 = 0x1.400002p-124
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x01200000), 0x01a00001, STR(__LINE__)); // 0x1.000002p+1 * 0x1.4p-125 = 0x1.400002p-124
+ comp32(single_of_bits(0x81200000) * single_of_bits(0xc0000001), 0x01a00001, STR(__LINE__)); // -0x1.4p-125 * -0x1.000002p+1 = 0x1.400002p-124
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x81200000), 0x01a00001, STR(__LINE__)); // -0x1.000002p+1 * -0x1.4p-125 = 0x1.400002p-124
+ comp32(single_of_bits(0x81200000) * single_of_bits(0x40000001), 0x81a00001, STR(__LINE__)); // -0x1.4p-125 * 0x1.000002p+1 = -0x1.400002p-124
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x81200000), 0x81a00001, STR(__LINE__)); // 0x1.000002p+1 * -0x1.4p-125 = -0x1.400002p-124
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x01200000), 0x81a00001, STR(__LINE__)); // -0x1.000002p+1 * 0x1.4p-125 = -0x1.400002p-124
+ comp32(single_of_bits(0x01200000) * single_of_bits(0xc0000001), 0x81a00001, STR(__LINE__)); // 0x1.4p-125 * -0x1.000002p+1 = -0x1.400002p-124
+}
+
+void f531(void) {
+ comp32(single_of_bits(0x40a00001) * single_of_bits(0x3f800001), 0x40a00002, STR(__LINE__)); // 0x1.400002p+2 * 0x1.000002p+0 = 0x1.400004p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x40a00001), 0x40a00002, STR(__LINE__)); // 0x1.000002p+0 * 0x1.400002p+2 = 0x1.400004p+2
+ comp32(single_of_bits(0xc0a00001) * single_of_bits(0xbf800001), 0x40a00002, STR(__LINE__)); // -0x1.400002p+2 * -0x1.000002p+0 = 0x1.400004p+2
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xc0a00001), 0x40a00002, STR(__LINE__)); // -0x1.000002p+0 * -0x1.400002p+2 = 0x1.400004p+2
+ comp32(single_of_bits(0xc0a00001) * single_of_bits(0x3f800001), 0xc0a00002, STR(__LINE__)); // -0x1.400002p+2 * 0x1.000002p+0 = -0x1.400004p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xc0a00001), 0xc0a00002, STR(__LINE__)); // 0x1.000002p+0 * -0x1.400002p+2 = -0x1.400004p+2
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x40a00001), 0xc0a00002, STR(__LINE__)); // -0x1.000002p+0 * 0x1.400002p+2 = -0x1.400004p+2
+ comp32(single_of_bits(0x40a00001) * single_of_bits(0xbf800001), 0xc0a00002, STR(__LINE__)); // 0x1.400002p+2 * -0x1.000002p+0 = -0x1.400004p+2
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0x3f7ffffe), 0x40dffffd, STR(__LINE__)); // 0x1.bffffep+2 * 0x1.fffffcp-1 = 0x1.bffffap+2
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x40dfffff), 0x40dffffd, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1.bffffep+2 = 0x1.bffffap+2
+}
+
+void f532(void) {
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0xbf7ffffe), 0x40dffffd, STR(__LINE__)); // -0x1.bffffep+2 * -0x1.fffffcp-1 = 0x1.bffffap+2
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0xc0dfffff), 0x40dffffd, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1.bffffep+2 = 0x1.bffffap+2
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0x3f7ffffe), 0xc0dffffd, STR(__LINE__)); // -0x1.bffffep+2 * 0x1.fffffcp-1 = -0x1.bffffap+2
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0xc0dfffff), 0xc0dffffd, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1.bffffep+2 = -0x1.bffffap+2
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x40dfffff), 0xc0dffffd, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1.bffffep+2 = -0x1.bffffap+2
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0xbf7ffffe), 0xc0dffffd, STR(__LINE__)); // 0x1.bffffep+2 * -0x1.fffffcp-1 = -0x1.bffffap+2
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0x3f7ffffe), 0x3f7ffffd, STR(__LINE__)); // 0x1.fffffep-1 * 0x1.fffffcp-1 = 0x1.fffffap-1
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x3f7fffff), 0x3f7ffffd, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1.fffffep-1 = 0x1.fffffap-1
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0xbf7ffffe), 0x3f7ffffd, STR(__LINE__)); // -0x1.fffffep-1 * -0x1.fffffcp-1 = 0x1.fffffap-1
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0xbf7fffff), 0x3f7ffffd, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1.fffffep-1 = 0x1.fffffap-1
+}
+
+void f533(void) {
+ comp32(single_of_bits(0xbf7fffff) * single_of_bits(0x3f7ffffe), 0xbf7ffffd, STR(__LINE__)); // -0x1.fffffep-1 * 0x1.fffffcp-1 = -0x1.fffffap-1
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0xbf7fffff), 0xbf7ffffd, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1.fffffep-1 = -0x1.fffffap-1
+ comp32(single_of_bits(0x3f7fffff) * single_of_bits(0xbf7ffffe), 0xbf7ffffd, STR(__LINE__)); // 0x1.fffffep-1 * -0x1.fffffcp-1 = -0x1.fffffap-1
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x3f7fffff), 0xbf7ffffd, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1.fffffep-1 = -0x1.fffffap-1
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0x3f800001), 0x7f000000, STR(__LINE__)); // 0x1.fffffep+126 * 0x1.000002p+0 = 0x1p+127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x7effffff), 0x7f000000, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffffep+126 = 0x1p+127
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0xbf800001), 0x7f000000, STR(__LINE__)); // -0x1.fffffep+126 * -0x1.000002p+0 = 0x1p+127
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xfeffffff), 0x7f000000, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffffep+126 = 0x1p+127
+ comp32(single_of_bits(0xfeffffff) * single_of_bits(0x3f800001), 0xff000000, STR(__LINE__)); // -0x1.fffffep+126 * 0x1.000002p+0 = -0x1p+127
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xfeffffff), 0xff000000, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffffep+126 = -0x1p+127
+}
+
+void f534(void) {
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x7effffff), 0xff000000, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffffep+126 = -0x1p+127
+ comp32(single_of_bits(0x7effffff) * single_of_bits(0xbf800001), 0xff000000, STR(__LINE__)); // 0x1.fffffep+126 * -0x1.000002p+0 = -0x1p+127
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0x40000001), 0x7f000000, STR(__LINE__)); // 0x1.fffffep+125 * 0x1.000002p+1 = 0x1p+127
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x7e7fffff), 0x7f000000, STR(__LINE__)); // 0x1.000002p+1 * 0x1.fffffep+125 = 0x1p+127
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0xc0000001), 0x7f000000, STR(__LINE__)); // -0x1.fffffep+125 * -0x1.000002p+1 = 0x1p+127
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0xfe7fffff), 0x7f000000, STR(__LINE__)); // -0x1.000002p+1 * -0x1.fffffep+125 = 0x1p+127
+ comp32(single_of_bits(0xfe7fffff) * single_of_bits(0x40000001), 0xff000000, STR(__LINE__)); // -0x1.fffffep+125 * 0x1.000002p+1 = -0x1p+127
+ comp32(single_of_bits(0x40000001) * single_of_bits(0xfe7fffff), 0xff000000, STR(__LINE__)); // 0x1.000002p+1 * -0x1.fffffep+125 = -0x1p+127
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x7e7fffff), 0xff000000, STR(__LINE__)); // -0x1.000002p+1 * 0x1.fffffep+125 = -0x1p+127
+ comp32(single_of_bits(0x7e7fffff) * single_of_bits(0xc0000001), 0xff000000, STR(__LINE__)); // 0x1.fffffep+125 * -0x1.000002p+1 = -0x1p+127
+}
+
+void f535(void) {
+ comp32(single_of_bits(0x01200001) * single_of_bits(0x40000001), 0x01a00002, STR(__LINE__)); // 0x1.400002p-125 * 0x1.000002p+1 = 0x1.400004p-124
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x01200001), 0x01a00002, STR(__LINE__)); // 0x1.000002p+1 * 0x1.400002p-125 = 0x1.400004p-124
+ comp32(single_of_bits(0x81200001) * single_of_bits(0xc0000001), 0x01a00002, STR(__LINE__)); // -0x1.400002p-125 * -0x1.000002p+1 = 0x1.400004p-124
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x81200001), 0x01a00002, STR(__LINE__)); // -0x1.000002p+1 * -0x1.400002p-125 = 0x1.400004p-124
+ comp32(single_of_bits(0x81200001) * single_of_bits(0x40000001), 0x81a00002, STR(__LINE__)); // -0x1.400002p-125 * 0x1.000002p+1 = -0x1.400004p-124
+ comp32(single_of_bits(0x40000001) * single_of_bits(0x81200001), 0x81a00002, STR(__LINE__)); // 0x1.000002p+1 * -0x1.400002p-125 = -0x1.400004p-124
+ comp32(single_of_bits(0xc0000001) * single_of_bits(0x01200001), 0x81a00002, STR(__LINE__)); // -0x1.000002p+1 * 0x1.400002p-125 = -0x1.400004p-124
+ comp32(single_of_bits(0x01200001) * single_of_bits(0xc0000001), 0x81a00002, STR(__LINE__)); // 0x1.400002p-125 * -0x1.000002p+1 = -0x1.400004p-124
+ comp32(single_of_bits(0x40400004) * single_of_bits(0x40e00000), 0x41a80004, STR(__LINE__)); // 0x1.800008p+1 * 0x1.cp+2 = 0x1.500008p+4
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0x40400004), 0x41a80004, STR(__LINE__)); // 0x1.cp+2 * 0x1.800008p+1 = 0x1.500008p+4
+}
+
+void f536(void) {
+ comp32(single_of_bits(0xc0400004) * single_of_bits(0xc0e00000), 0x41a80004, STR(__LINE__)); // -0x1.800008p+1 * -0x1.cp+2 = 0x1.500008p+4
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0xc0400004), 0x41a80004, STR(__LINE__)); // -0x1.cp+2 * -0x1.800008p+1 = 0x1.500008p+4
+ comp32(single_of_bits(0xc0400004) * single_of_bits(0x40e00000), 0xc1a80004, STR(__LINE__)); // -0x1.800008p+1 * 0x1.cp+2 = -0x1.500008p+4
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0xc0400004), 0xc1a80004, STR(__LINE__)); // 0x1.cp+2 * -0x1.800008p+1 = -0x1.500008p+4
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0x40400004), 0xc1a80004, STR(__LINE__)); // -0x1.cp+2 * 0x1.800008p+1 = -0x1.500008p+4
+ comp32(single_of_bits(0x40400004) * single_of_bits(0xc0e00000), 0xc1a80004, STR(__LINE__)); // 0x1.800008p+1 * -0x1.cp+2 = -0x1.500008p+4
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x3f800001), 0x40400002, STR(__LINE__)); // 0x1.8p+1 * 0x1.000002p+0 = 0x1.800004p+1
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x40400000), 0x40400002, STR(__LINE__)); // 0x1.000002p+0 * 0x1.8p+1 = 0x1.800004p+1
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xbf800001), 0x40400002, STR(__LINE__)); // -0x1.8p+1 * -0x1.000002p+0 = 0x1.800004p+1
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xc0400000), 0x40400002, STR(__LINE__)); // -0x1.000002p+0 * -0x1.8p+1 = 0x1.800004p+1
+}
+
+void f537(void) {
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x3f800001), 0xc0400002, STR(__LINE__)); // -0x1.8p+1 * 0x1.000002p+0 = -0x1.800004p+1
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xc0400000), 0xc0400002, STR(__LINE__)); // 0x1.000002p+0 * -0x1.8p+1 = -0x1.800004p+1
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x40400000), 0xc0400002, STR(__LINE__)); // -0x1.000002p+0 * 0x1.8p+1 = -0x1.800004p+1
+ comp32(single_of_bits(0x40400000) * single_of_bits(0xbf800001), 0xc0400002, STR(__LINE__)); // 0x1.8p+1 * -0x1.000002p+0 = -0x1.800004p+1
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00800003), 0x00400002, STR(__LINE__)); // 0x1p-1 * 0x1.000006p-126 = 0x1.000008p-127
+ comp32(single_of_bits(0x00800003) * single_of_bits(0x3f000000), 0x00400002, STR(__LINE__)); // 0x1.000006p-126 * 0x1p-1 = 0x1.000008p-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80800003), 0x00400002, STR(__LINE__)); // -0x1p-1 * -0x1.000006p-126 = 0x1.000008p-127
+ comp32(single_of_bits(0x80800003) * single_of_bits(0xbf000000), 0x00400002, STR(__LINE__)); // -0x1.000006p-126 * -0x1p-1 = 0x1.000008p-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00800003), 0x80400002, STR(__LINE__)); // -0x1p-1 * 0x1.000006p-126 = -0x1.000008p-127
+ comp32(single_of_bits(0x00800003) * single_of_bits(0xbf000000), 0x80400002, STR(__LINE__)); // 0x1.000006p-126 * -0x1p-1 = -0x1.000008p-127
+}
+
+void f538(void) {
+ comp32(single_of_bits(0x80800003) * single_of_bits(0x3f000000), 0x80400002, STR(__LINE__)); // -0x1.000006p-126 * 0x1p-1 = -0x1.000008p-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80800003), 0x80400002, STR(__LINE__)); // 0x1p-1 * -0x1.000006p-126 = -0x1.000008p-127
+ comp32(single_of_bits(0x4040000c) * single_of_bits(0x40e00000), 0x41a8000a, STR(__LINE__)); // 0x1.800018p+1 * 0x1.cp+2 = 0x1.500014p+4
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0x4040000c), 0x41a8000a, STR(__LINE__)); // 0x1.cp+2 * 0x1.800018p+1 = 0x1.500014p+4
+ comp32(single_of_bits(0xc040000c) * single_of_bits(0xc0e00000), 0x41a8000a, STR(__LINE__)); // -0x1.800018p+1 * -0x1.cp+2 = 0x1.500014p+4
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0xc040000c), 0x41a8000a, STR(__LINE__)); // -0x1.cp+2 * -0x1.800018p+1 = 0x1.500014p+4
+ comp32(single_of_bits(0xc040000c) * single_of_bits(0x40e00000), 0xc1a8000a, STR(__LINE__)); // -0x1.800018p+1 * 0x1.cp+2 = -0x1.500014p+4
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0xc040000c), 0xc1a8000a, STR(__LINE__)); // 0x1.cp+2 * -0x1.800018p+1 = -0x1.500014p+4
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0x4040000c), 0xc1a8000a, STR(__LINE__)); // -0x1.cp+2 * 0x1.800018p+1 = -0x1.500014p+4
+ comp32(single_of_bits(0x4040000c) * single_of_bits(0xc0e00000), 0xc1a8000a, STR(__LINE__)); // 0x1.800018p+1 * -0x1.cp+2 = -0x1.500014p+4
+}
+
+void f539(void) {
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x3f800003), 0x40400004, STR(__LINE__)); // 0x1.8p+1 * 0x1.000006p+0 = 0x1.800008p+1
+ comp32(single_of_bits(0x3f800003) * single_of_bits(0x40400000), 0x40400004, STR(__LINE__)); // 0x1.000006p+0 * 0x1.8p+1 = 0x1.800008p+1
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0xbf800003), 0x40400004, STR(__LINE__)); // -0x1.8p+1 * -0x1.000006p+0 = 0x1.800008p+1
+ comp32(single_of_bits(0xbf800003) * single_of_bits(0xc0400000), 0x40400004, STR(__LINE__)); // -0x1.000006p+0 * -0x1.8p+1 = 0x1.800008p+1
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x3f800003), 0xc0400004, STR(__LINE__)); // -0x1.8p+1 * 0x1.000006p+0 = -0x1.800008p+1
+ comp32(single_of_bits(0x3f800003) * single_of_bits(0xc0400000), 0xc0400004, STR(__LINE__)); // 0x1.000006p+0 * -0x1.8p+1 = -0x1.800008p+1
+ comp32(single_of_bits(0xbf800003) * single_of_bits(0x40400000), 0xc0400004, STR(__LINE__)); // -0x1.000006p+0 * 0x1.8p+1 = -0x1.800008p+1
+ comp32(single_of_bits(0x40400000) * single_of_bits(0xbf800003), 0xc0400004, STR(__LINE__)); // 0x1.8p+1 * -0x1.000006p+0 = -0x1.800008p+1
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x00800001), 0x00400000, STR(__LINE__)); // 0x1p-1 * 0x1.000002p-126 = 0x1p-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0x3f000000), 0x00400000, STR(__LINE__)); // 0x1.000002p-126 * 0x1p-1 = 0x1p-127
+}
+
+void f540(void) {
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x80800001), 0x00400000, STR(__LINE__)); // -0x1p-1 * -0x1.000002p-126 = 0x1p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0xbf000000), 0x00400000, STR(__LINE__)); // -0x1.000002p-126 * -0x1p-1 = 0x1p-127
+ comp32(single_of_bits(0xbf000000) * single_of_bits(0x00800001), 0x80400000, STR(__LINE__)); // -0x1p-1 * 0x1.000002p-126 = -0x1p-127
+ comp32(single_of_bits(0x00800001) * single_of_bits(0xbf000000), 0x80400000, STR(__LINE__)); // 0x1.000002p-126 * -0x1p-1 = -0x1p-127
+ comp32(single_of_bits(0x80800001) * single_of_bits(0x3f000000), 0x80400000, STR(__LINE__)); // -0x1.000002p-126 * 0x1p-1 = -0x1p-127
+ comp32(single_of_bits(0x3f000000) * single_of_bits(0x80800001), 0x80400000, STR(__LINE__)); // 0x1p-1 * -0x1.000002p-126 = -0x1p-127
+ comp32(single_of_bits(0x3fa00000) * single_of_bits(0x3ffffffe), 0x401fffff, STR(__LINE__)); // 0x1.4p+0 * 0x1.fffffcp+0 = 0x1.3ffffep+1
+ comp32(single_of_bits(0x3ffffffe) * single_of_bits(0x3fa00000), 0x401fffff, STR(__LINE__)); // 0x1.fffffcp+0 * 0x1.4p+0 = 0x1.3ffffep+1
+ comp32(single_of_bits(0xbfa00000) * single_of_bits(0xbffffffe), 0x401fffff, STR(__LINE__)); // -0x1.4p+0 * -0x1.fffffcp+0 = 0x1.3ffffep+1
+ comp32(single_of_bits(0xbffffffe) * single_of_bits(0xbfa00000), 0x401fffff, STR(__LINE__)); // -0x1.fffffcp+0 * -0x1.4p+0 = 0x1.3ffffep+1
+}
+
+void f541(void) {
+ comp32(single_of_bits(0xbfa00000) * single_of_bits(0x3ffffffe), 0xc01fffff, STR(__LINE__)); // -0x1.4p+0 * 0x1.fffffcp+0 = -0x1.3ffffep+1
+ comp32(single_of_bits(0x3ffffffe) * single_of_bits(0xbfa00000), 0xc01fffff, STR(__LINE__)); // 0x1.fffffcp+0 * -0x1.4p+0 = -0x1.3ffffep+1
+ comp32(single_of_bits(0xbffffffe) * single_of_bits(0x3fa00000), 0xc01fffff, STR(__LINE__)); // -0x1.fffffcp+0 * 0x1.4p+0 = -0x1.3ffffep+1
+ comp32(single_of_bits(0x3fa00000) * single_of_bits(0xbffffffe), 0xc01fffff, STR(__LINE__)); // 0x1.4p+0 * -0x1.fffffcp+0 = -0x1.3ffffep+1
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0x3f800001), 0x40e00002, STR(__LINE__)); // 0x1.cp+2 * 0x1.000002p+0 = 0x1.c00004p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x40e00000), 0x40e00002, STR(__LINE__)); // 0x1.000002p+0 * 0x1.cp+2 = 0x1.c00004p+2
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0xbf800001), 0x40e00002, STR(__LINE__)); // -0x1.cp+2 * -0x1.000002p+0 = 0x1.c00004p+2
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xc0e00000), 0x40e00002, STR(__LINE__)); // -0x1.000002p+0 * -0x1.cp+2 = 0x1.c00004p+2
+ comp32(single_of_bits(0xc0e00000) * single_of_bits(0x3f800001), 0xc0e00002, STR(__LINE__)); // -0x1.cp+2 * 0x1.000002p+0 = -0x1.c00004p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xc0e00000), 0xc0e00002, STR(__LINE__)); // 0x1.000002p+0 * -0x1.cp+2 = -0x1.c00004p+2
+}
+
+void f542(void) {
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x40e00000), 0xc0e00002, STR(__LINE__)); // -0x1.000002p+0 * 0x1.cp+2 = -0x1.c00004p+2
+ comp32(single_of_bits(0x40e00000) * single_of_bits(0xbf800001), 0xc0e00002, STR(__LINE__)); // 0x1.cp+2 * -0x1.000002p+0 = -0x1.c00004p+2
+ comp32(single_of_bits(0x01600000) * single_of_bits(0x40800001), 0x02600002, STR(__LINE__)); // 0x1.cp-125 * 0x1.000002p+2 = 0x1.c00004p-123
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x01600000), 0x02600002, STR(__LINE__)); // 0x1.000002p+2 * 0x1.cp-125 = 0x1.c00004p-123
+ comp32(single_of_bits(0x81600000) * single_of_bits(0xc0800001), 0x02600002, STR(__LINE__)); // -0x1.cp-125 * -0x1.000002p+2 = 0x1.c00004p-123
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x81600000), 0x02600002, STR(__LINE__)); // -0x1.000002p+2 * -0x1.cp-125 = 0x1.c00004p-123
+ comp32(single_of_bits(0x81600000) * single_of_bits(0x40800001), 0x82600002, STR(__LINE__)); // -0x1.cp-125 * 0x1.000002p+2 = -0x1.c00004p-123
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x81600000), 0x82600002, STR(__LINE__)); // 0x1.000002p+2 * -0x1.cp-125 = -0x1.c00004p-123
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x01600000), 0x82600002, STR(__LINE__)); // -0x1.000002p+2 * 0x1.cp-125 = -0x1.c00004p-123
+ comp32(single_of_bits(0x01600000) * single_of_bits(0xc0800001), 0x82600002, STR(__LINE__)); // 0x1.cp-125 * -0x1.000002p+2 = -0x1.c00004p-123
+}
+
+void f543(void) {
+ comp32(single_of_bits(0x40400001) * single_of_bits(0x3f800001), 0x40400003, STR(__LINE__)); // 0x1.800002p+1 * 0x1.000002p+0 = 0x1.800006p+1
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x40400001), 0x40400003, STR(__LINE__)); // 0x1.000002p+0 * 0x1.800002p+1 = 0x1.800006p+1
+ comp32(single_of_bits(0xc0400001) * single_of_bits(0xbf800001), 0x40400003, STR(__LINE__)); // -0x1.800002p+1 * -0x1.000002p+0 = 0x1.800006p+1
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xc0400001), 0x40400003, STR(__LINE__)); // -0x1.000002p+0 * -0x1.800002p+1 = 0x1.800006p+1
+ comp32(single_of_bits(0xc0400001) * single_of_bits(0x3f800001), 0xc0400003, STR(__LINE__)); // -0x1.800002p+1 * 0x1.000002p+0 = -0x1.800006p+1
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xc0400001), 0xc0400003, STR(__LINE__)); // 0x1.000002p+0 * -0x1.800002p+1 = -0x1.800006p+1
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x40400001), 0xc0400003, STR(__LINE__)); // -0x1.000002p+0 * 0x1.800002p+1 = -0x1.800006p+1
+ comp32(single_of_bits(0x40400001) * single_of_bits(0xbf800001), 0xc0400003, STR(__LINE__)); // 0x1.800002p+1 * -0x1.000002p+0 = -0x1.800006p+1
+ comp32(single_of_bits(0x40400001) * single_of_bits(0x3f800003), 0x40400006, STR(__LINE__)); // 0x1.800002p+1 * 0x1.000006p+0 = 0x1.80000cp+1
+ comp32(single_of_bits(0x3f800003) * single_of_bits(0x40400001), 0x40400006, STR(__LINE__)); // 0x1.000006p+0 * 0x1.800002p+1 = 0x1.80000cp+1
+}
+
+void f544(void) {
+ comp32(single_of_bits(0xc0400001) * single_of_bits(0xbf800003), 0x40400006, STR(__LINE__)); // -0x1.800002p+1 * -0x1.000006p+0 = 0x1.80000cp+1
+ comp32(single_of_bits(0xbf800003) * single_of_bits(0xc0400001), 0x40400006, STR(__LINE__)); // -0x1.000006p+0 * -0x1.800002p+1 = 0x1.80000cp+1
+ comp32(single_of_bits(0xc0400001) * single_of_bits(0x3f800003), 0xc0400006, STR(__LINE__)); // -0x1.800002p+1 * 0x1.000006p+0 = -0x1.80000cp+1
+ comp32(single_of_bits(0x3f800003) * single_of_bits(0xc0400001), 0xc0400006, STR(__LINE__)); // 0x1.000006p+0 * -0x1.800002p+1 = -0x1.80000cp+1
+ comp32(single_of_bits(0xbf800003) * single_of_bits(0x40400001), 0xc0400006, STR(__LINE__)); // -0x1.000006p+0 * 0x1.800002p+1 = -0x1.80000cp+1
+ comp32(single_of_bits(0x40400001) * single_of_bits(0xbf800003), 0xc0400006, STR(__LINE__)); // 0x1.800002p+1 * -0x1.000006p+0 = -0x1.80000cp+1
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0x3f7ffffe), 0x403ffffe, STR(__LINE__)); // 0x1.7ffffep+1 * 0x1.fffffcp-1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x403fffff), 0x403ffffe, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1.7ffffep+1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0xbf7ffffe), 0x403ffffe, STR(__LINE__)); // -0x1.7ffffep+1 * -0x1.fffffcp-1 = 0x1.7ffffcp+1
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0xc03fffff), 0x403ffffe, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1.7ffffep+1 = 0x1.7ffffcp+1
+}
+
+void f545(void) {
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0x3f7ffffe), 0xc03ffffe, STR(__LINE__)); // -0x1.7ffffep+1 * 0x1.fffffcp-1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0xc03fffff), 0xc03ffffe, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1.7ffffep+1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x403fffff), 0xc03ffffe, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1.7ffffep+1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0xbf7ffffe), 0xc03ffffe, STR(__LINE__)); // 0x1.7ffffep+1 * -0x1.fffffcp-1 = -0x1.7ffffcp+1
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0x3f7ffffc), 0x40dffffc, STR(__LINE__)); // 0x1.bffffep+2 * 0x1.fffff8p-1 = 0x1.bffff8p+2
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0x40dfffff), 0x40dffffc, STR(__LINE__)); // 0x1.fffff8p-1 * 0x1.bffffep+2 = 0x1.bffff8p+2
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0xbf7ffffc), 0x40dffffc, STR(__LINE__)); // -0x1.bffffep+2 * -0x1.fffff8p-1 = 0x1.bffff8p+2
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0xc0dfffff), 0x40dffffc, STR(__LINE__)); // -0x1.fffff8p-1 * -0x1.bffffep+2 = 0x1.bffff8p+2
+ comp32(single_of_bits(0xc0dfffff) * single_of_bits(0x3f7ffffc), 0xc0dffffc, STR(__LINE__)); // -0x1.bffffep+2 * 0x1.fffff8p-1 = -0x1.bffff8p+2
+ comp32(single_of_bits(0x3f7ffffc) * single_of_bits(0xc0dfffff), 0xc0dffffc, STR(__LINE__)); // 0x1.fffff8p-1 * -0x1.bffffep+2 = -0x1.bffff8p+2
+}
+
+void f546(void) {
+ comp32(single_of_bits(0xbf7ffffc) * single_of_bits(0x40dfffff), 0xc0dffffc, STR(__LINE__)); // -0x1.fffff8p-1 * 0x1.bffffep+2 = -0x1.bffff8p+2
+ comp32(single_of_bits(0x40dfffff) * single_of_bits(0xbf7ffffc), 0xc0dffffc, STR(__LINE__)); // 0x1.bffffep+2 * -0x1.fffff8p-1 = -0x1.bffff8p+2
+ comp32(single_of_bits(0x00c00001) * single_of_bits(0x40800001), 0x01c00003, STR(__LINE__)); // 0x1.800002p-126 * 0x1.000002p+2 = 0x1.800006p-124
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x00c00001), 0x01c00003, STR(__LINE__)); // 0x1.000002p+2 * 0x1.800002p-126 = 0x1.800006p-124
+ comp32(single_of_bits(0x80c00001) * single_of_bits(0xc0800001), 0x01c00003, STR(__LINE__)); // -0x1.800002p-126 * -0x1.000002p+2 = 0x1.800006p-124
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x80c00001), 0x01c00003, STR(__LINE__)); // -0x1.000002p+2 * -0x1.800002p-126 = 0x1.800006p-124
+ comp32(single_of_bits(0x80c00001) * single_of_bits(0x40800001), 0x81c00003, STR(__LINE__)); // -0x1.800002p-126 * 0x1.000002p+2 = -0x1.800006p-124
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x80c00001), 0x81c00003, STR(__LINE__)); // 0x1.000002p+2 * -0x1.800002p-126 = -0x1.800006p-124
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x00c00001), 0x81c00003, STR(__LINE__)); // -0x1.000002p+2 * 0x1.800002p-126 = -0x1.800006p-124
+ comp32(single_of_bits(0x00c00001) * single_of_bits(0xc0800001), 0x81c00003, STR(__LINE__)); // 0x1.800002p-126 * -0x1.000002p+2 = -0x1.800006p-124
+}
+
+void f547(void) {
+ comp32(single_of_bits(0x40e00001) * single_of_bits(0x3f800001), 0x40e00003, STR(__LINE__)); // 0x1.c00002p+2 * 0x1.000002p+0 = 0x1.c00006p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x40e00001), 0x40e00003, STR(__LINE__)); // 0x1.000002p+0 * 0x1.c00002p+2 = 0x1.c00006p+2
+ comp32(single_of_bits(0xc0e00001) * single_of_bits(0xbf800001), 0x40e00003, STR(__LINE__)); // -0x1.c00002p+2 * -0x1.000002p+0 = 0x1.c00006p+2
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xc0e00001), 0x40e00003, STR(__LINE__)); // -0x1.000002p+0 * -0x1.c00002p+2 = 0x1.c00006p+2
+ comp32(single_of_bits(0xc0e00001) * single_of_bits(0x3f800001), 0xc0e00003, STR(__LINE__)); // -0x1.c00002p+2 * 0x1.000002p+0 = -0x1.c00006p+2
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xc0e00001), 0xc0e00003, STR(__LINE__)); // 0x1.000002p+0 * -0x1.c00002p+2 = -0x1.c00006p+2
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x40e00001), 0xc0e00003, STR(__LINE__)); // -0x1.000002p+0 * 0x1.c00002p+2 = -0x1.c00006p+2
+ comp32(single_of_bits(0x40e00001) * single_of_bits(0xbf800001), 0xc0e00003, STR(__LINE__)); // 0x1.c00002p+2 * -0x1.000002p+0 = -0x1.c00006p+2
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0x3f800001), 0x3f800000, STR(__LINE__)); // 0x1.fffffcp-1 * 0x1.000002p+0 = 0x1p+0
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0x3f7ffffe), 0x3f800000, STR(__LINE__)); // 0x1.000002p+0 * 0x1.fffffcp-1 = 0x1p+0
+}
+
+void f548(void) {
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0xbf800001), 0x3f800000, STR(__LINE__)); // -0x1.fffffcp-1 * -0x1.000002p+0 = 0x1p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0xbf7ffffe), 0x3f800000, STR(__LINE__)); // -0x1.000002p+0 * -0x1.fffffcp-1 = 0x1p+0
+ comp32(single_of_bits(0xbf7ffffe) * single_of_bits(0x3f800001), 0xbf800000, STR(__LINE__)); // -0x1.fffffcp-1 * 0x1.000002p+0 = -0x1p+0
+ comp32(single_of_bits(0x3f800001) * single_of_bits(0xbf7ffffe), 0xbf800000, STR(__LINE__)); // 0x1.000002p+0 * -0x1.fffffcp-1 = -0x1p+0
+ comp32(single_of_bits(0xbf800001) * single_of_bits(0x3f7ffffe), 0xbf800000, STR(__LINE__)); // -0x1.000002p+0 * 0x1.fffffcp-1 = -0x1p+0
+ comp32(single_of_bits(0x3f7ffffe) * single_of_bits(0xbf800001), 0xbf800000, STR(__LINE__)); // 0x1.fffffcp-1 * -0x1.000002p+0 = -0x1p+0
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0x3f7ffffd), 0x403ffffd, STR(__LINE__)); // 0x1.7ffffep+1 * 0x1.fffffap-1 = 0x1.7ffffap+1
+ comp32(single_of_bits(0x3f7ffffd) * single_of_bits(0x403fffff), 0x403ffffd, STR(__LINE__)); // 0x1.fffffap-1 * 0x1.7ffffep+1 = 0x1.7ffffap+1
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0xbf7ffffd), 0x403ffffd, STR(__LINE__)); // -0x1.7ffffep+1 * -0x1.fffffap-1 = 0x1.7ffffap+1
+ comp32(single_of_bits(0xbf7ffffd) * single_of_bits(0xc03fffff), 0x403ffffd, STR(__LINE__)); // -0x1.fffffap-1 * -0x1.7ffffep+1 = 0x1.7ffffap+1
+}
+
+void f549(void) {
+ comp32(single_of_bits(0xc03fffff) * single_of_bits(0x3f7ffffd), 0xc03ffffd, STR(__LINE__)); // -0x1.7ffffep+1 * 0x1.fffffap-1 = -0x1.7ffffap+1
+ comp32(single_of_bits(0x3f7ffffd) * single_of_bits(0xc03fffff), 0xc03ffffd, STR(__LINE__)); // 0x1.fffffap-1 * -0x1.7ffffep+1 = -0x1.7ffffap+1
+ comp32(single_of_bits(0xbf7ffffd) * single_of_bits(0x403fffff), 0xc03ffffd, STR(__LINE__)); // -0x1.fffffap-1 * 0x1.7ffffep+1 = -0x1.7ffffap+1
+ comp32(single_of_bits(0x403fffff) * single_of_bits(0xbf7ffffd), 0xc03ffffd, STR(__LINE__)); // 0x1.7ffffep+1 * -0x1.fffffap-1 = -0x1.7ffffap+1
+ comp32(single_of_bits(0x01600001) * single_of_bits(0x40800001), 0x02600003, STR(__LINE__)); // 0x1.c00002p-125 * 0x1.000002p+2 = 0x1.c00006p-123
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x01600001), 0x02600003, STR(__LINE__)); // 0x1.000002p+2 * 0x1.c00002p-125 = 0x1.c00006p-123
+ comp32(single_of_bits(0x81600001) * single_of_bits(0xc0800001), 0x02600003, STR(__LINE__)); // -0x1.c00002p-125 * -0x1.000002p+2 = 0x1.c00006p-123
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x81600001), 0x02600003, STR(__LINE__)); // -0x1.000002p+2 * -0x1.c00002p-125 = 0x1.c00006p-123
+ comp32(single_of_bits(0x81600001) * single_of_bits(0x40800001), 0x82600003, STR(__LINE__)); // -0x1.c00002p-125 * 0x1.000002p+2 = -0x1.c00006p-123
+ comp32(single_of_bits(0x40800001) * single_of_bits(0x81600001), 0x82600003, STR(__LINE__)); // 0x1.000002p+2 * -0x1.c00002p-125 = -0x1.c00006p-123
+}
+
+void f550(void) {
+ comp32(single_of_bits(0xc0800001) * single_of_bits(0x01600001), 0x82600003, STR(__LINE__)); // -0x1.000002p+2 * 0x1.c00002p-125 = -0x1.c00006p-123
+ comp32(single_of_bits(0x01600001) * single_of_bits(0xc0800001), 0x82600003, STR(__LINE__)); // 0x1.c00002p-125 * -0x1.000002p+2 = -0x1.c00006p-123
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0x40000000), 0x00fffffe, STR(__LINE__)); // 0x1.fffffcp-127 * 0x1p+1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x007fffff), 0x00fffffe, STR(__LINE__)); // 0x1p+1 * 0x1.fffffcp-127 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0xc0000000), 0x00fffffe, STR(__LINE__)); // -0x1.fffffcp-127 * -0x1p+1 = 0x1.fffffcp-126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x807fffff), 0x00fffffe, STR(__LINE__)); // -0x1p+1 * -0x1.fffffcp-127 = 0x1.fffffcp-126
+ comp32(single_of_bits(0x807fffff) * single_of_bits(0x40000000), 0x80fffffe, STR(__LINE__)); // -0x1.fffffcp-127 * 0x1p+1 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x807fffff), 0x80fffffe, STR(__LINE__)); // 0x1p+1 * -0x1.fffffcp-127 = -0x1.fffffcp-126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x007fffff), 0x80fffffe, STR(__LINE__)); // -0x1p+1 * 0x1.fffffcp-127 = -0x1.fffffcp-126
+ comp32(single_of_bits(0x007fffff) * single_of_bits(0xc0000000), 0x80fffffe, STR(__LINE__)); // 0x1.fffffcp-127 * -0x1p+1 = -0x1.fffffcp-126
+}
+
+void f551(void) {
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x007ffffd), 0x80fffffa, STR(__LINE__)); // -0x1p+1 * 0x1.fffff4p-127 = -0x1.fffff4p-126
+ comp32(single_of_bits(0x007ffffd) * single_of_bits(0xc0000000), 0x80fffffa, STR(__LINE__)); // 0x1.fffff4p-127 * -0x1p+1 = -0x1.fffff4p-126
+ comp32(single_of_bits(0x807ffffd) * single_of_bits(0xc0000000), 0x00fffffa, STR(__LINE__)); // -0x1.fffff4p-127 * -0x1p+1 = 0x1.fffff4p-126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x807ffffd), 0x00fffffa, STR(__LINE__)); // -0x1p+1 * -0x1.fffff4p-127 = 0x1.fffff4p-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x807ffffd), 0x80fffffa, STR(__LINE__)); // 0x1p+1 * -0x1.fffff4p-127 = -0x1.fffff4p-126
+ comp32(single_of_bits(0x807ffffd) * single_of_bits(0x40000000), 0x80fffffa, STR(__LINE__)); // -0x1.fffff4p-127 * 0x1p+1 = -0x1.fffff4p-126
+ comp32(single_of_bits(0x007ffffd) * single_of_bits(0x40000000), 0x00fffffa, STR(__LINE__)); // 0x1.fffff4p-127 * 0x1p+1 = 0x1.fffff4p-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x007ffffd), 0x00fffffa, STR(__LINE__)); // 0x1p+1 * 0x1.fffff4p-127 = 0x1.fffff4p-126
+ comp32(single_of_bits(0x007ffffc) * single_of_bits(0x40000000), 0x00fffff8, STR(__LINE__)); // 0x1.fffffp-127 * 0x1p+1 = 0x1.fffffp-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x007ffffc), 0x00fffff8, STR(__LINE__)); // 0x1p+1 * 0x1.fffffp-127 = 0x1.fffffp-126
+}
+
+void f552(void) {
+ comp32(single_of_bits(0x807ffffc) * single_of_bits(0xc0000000), 0x00fffff8, STR(__LINE__)); // -0x1.fffffp-127 * -0x1p+1 = 0x1.fffffp-126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x807ffffc), 0x00fffff8, STR(__LINE__)); // -0x1p+1 * -0x1.fffffp-127 = 0x1.fffffp-126
+ comp32(single_of_bits(0x007ffffc) * single_of_bits(0xc0000000), 0x80fffff8, STR(__LINE__)); // 0x1.fffffp-127 * -0x1p+1 = -0x1.fffffp-126
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x007ffffc), 0x80fffff8, STR(__LINE__)); // -0x1p+1 * 0x1.fffffp-127 = -0x1.fffffp-126
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x807ffffc), 0x80fffff8, STR(__LINE__)); // 0x1p+1 * -0x1.fffffp-127 = -0x1.fffffp-126
+ comp32(single_of_bits(0x807ffffc) * single_of_bits(0x40000000), 0x80fffff8, STR(__LINE__)); // -0x1.fffffp-127 * 0x1p+1 = -0x1.fffffp-126
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40000000), 0x00000002, STR(__LINE__)); // 0x1p-149 * 0x1p+1 = 0x1p-148
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00000001), 0x00000002, STR(__LINE__)); // 0x1p+1 * 0x1p-149 = 0x1p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0000000), 0x00000002, STR(__LINE__)); // -0x1p-149 * -0x1p+1 = 0x1p-148
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80000001), 0x00000002, STR(__LINE__)); // -0x1p+1 * -0x1p-149 = 0x1p-148
+}
+
+void f553(void) {
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0000000), 0x80000002, STR(__LINE__)); // 0x1p-149 * -0x1p+1 = -0x1p-148
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00000001), 0x80000002, STR(__LINE__)); // -0x1p+1 * 0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80000001), 0x80000002, STR(__LINE__)); // 0x1p+1 * -0x1p-149 = -0x1p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40000000), 0x80000002, STR(__LINE__)); // -0x1p-149 * 0x1p+1 = -0x1p-148
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x40000000), 0x00000004, STR(__LINE__)); // 0x1p-148 * 0x1p+1 = 0x1p-147
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00000002), 0x00000004, STR(__LINE__)); // 0x1p+1 * 0x1p-148 = 0x1p-147
+ comp32(single_of_bits(0x80000002) * single_of_bits(0xc0000000), 0x00000004, STR(__LINE__)); // -0x1p-148 * -0x1p+1 = 0x1p-147
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80000002), 0x00000004, STR(__LINE__)); // -0x1p+1 * -0x1p-148 = 0x1p-147
+ comp32(single_of_bits(0x00000002) * single_of_bits(0xc0000000), 0x80000004, STR(__LINE__)); // 0x1p-148 * -0x1p+1 = -0x1p-147
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00000002), 0x80000004, STR(__LINE__)); // -0x1p+1 * 0x1p-148 = -0x1p-147
+}
+
+void f554(void) {
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80000002), 0x80000004, STR(__LINE__)); // 0x1p+1 * -0x1p-148 = -0x1p-147
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x40000000), 0x80000004, STR(__LINE__)); // -0x1p-148 * 0x1p+1 = -0x1p-147
+ comp32(single_of_bits(0x00000003) * single_of_bits(0x40000000), 0x00000006, STR(__LINE__)); // 0x1.8p-148 * 0x1p+1 = 0x1.8p-147
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x00000003), 0x00000006, STR(__LINE__)); // 0x1p+1 * 0x1.8p-148 = 0x1.8p-147
+ comp32(single_of_bits(0x80000003) * single_of_bits(0xc0000000), 0x00000006, STR(__LINE__)); // -0x1.8p-148 * -0x1p+1 = 0x1.8p-147
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x80000003), 0x00000006, STR(__LINE__)); // -0x1p+1 * -0x1.8p-148 = 0x1.8p-147
+ comp32(single_of_bits(0x00000003) * single_of_bits(0xc0000000), 0x80000006, STR(__LINE__)); // 0x1.8p-148 * -0x1p+1 = -0x1.8p-147
+ comp32(single_of_bits(0xc0000000) * single_of_bits(0x00000003), 0x80000006, STR(__LINE__)); // -0x1p+1 * 0x1.8p-148 = -0x1.8p-147
+ comp32(single_of_bits(0x40000000) * single_of_bits(0x80000003), 0x80000006, STR(__LINE__)); // 0x1p+1 * -0x1.8p-148 = -0x1.8p-147
+ comp32(single_of_bits(0x80000003) * single_of_bits(0x40000000), 0x80000006, STR(__LINE__)); // -0x1.8p-148 * 0x1p+1 = -0x1.8p-147
+}
+
+void f555(void) {
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x00000001), 0x00000003, STR(__LINE__)); // 0x1.8p+1 * 0x1p-149 = 0x1.8p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40400000), 0x00000003, STR(__LINE__)); // 0x1p-149 * 0x1.8p+1 = 0x1.8p-148
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x80000001), 0x00000003, STR(__LINE__)); // -0x1.8p+1 * -0x1p-149 = 0x1.8p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0400000), 0x00000003, STR(__LINE__)); // -0x1p-149 * -0x1.8p+1 = 0x1.8p-148
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x80000001), 0x80000003, STR(__LINE__)); // 0x1.8p+1 * -0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40400000), 0x80000003, STR(__LINE__)); // -0x1p-149 * 0x1.8p+1 = -0x1.8p-148
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0400000), 0x80000003, STR(__LINE__)); // 0x1p-149 * -0x1.8p+1 = -0x1.8p-148
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x00000001), 0x80000003, STR(__LINE__)); // -0x1.8p+1 * 0x1p-149 = -0x1.8p-148
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x00000002), 0x00000006, STR(__LINE__)); // 0x1.8p+1 * 0x1p-148 = 0x1.8p-147
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x40400000), 0x00000006, STR(__LINE__)); // 0x1p-148 * 0x1.8p+1 = 0x1.8p-147
+}
+
+void f556(void) {
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x80000002), 0x00000006, STR(__LINE__)); // -0x1.8p+1 * -0x1p-148 = 0x1.8p-147
+ comp32(single_of_bits(0x80000002) * single_of_bits(0xc0400000), 0x00000006, STR(__LINE__)); // -0x1p-148 * -0x1.8p+1 = 0x1.8p-147
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x80000002), 0x80000006, STR(__LINE__)); // 0x1.8p+1 * -0x1p-148 = -0x1.8p-147
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x40400000), 0x80000006, STR(__LINE__)); // -0x1p-148 * 0x1.8p+1 = -0x1.8p-147
+ comp32(single_of_bits(0x00000002) * single_of_bits(0xc0400000), 0x80000006, STR(__LINE__)); // 0x1p-148 * -0x1.8p+1 = -0x1.8p-147
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x00000002), 0x80000006, STR(__LINE__)); // -0x1.8p+1 * 0x1p-148 = -0x1.8p-147
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x00000003), 0x00000009, STR(__LINE__)); // 0x1.8p+1 * 0x1.8p-148 = 0x1.2p-146
+ comp32(single_of_bits(0x00000003) * single_of_bits(0x40400000), 0x00000009, STR(__LINE__)); // 0x1.8p-148 * 0x1.8p+1 = 0x1.2p-146
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x80000003), 0x00000009, STR(__LINE__)); // -0x1.8p+1 * -0x1.8p-148 = 0x1.2p-146
+ comp32(single_of_bits(0x80000003) * single_of_bits(0xc0400000), 0x00000009, STR(__LINE__)); // -0x1.8p-148 * -0x1.8p+1 = 0x1.2p-146
+}
+
+void f557(void) {
+ comp32(single_of_bits(0x40400000) * single_of_bits(0x80000003), 0x80000009, STR(__LINE__)); // 0x1.8p+1 * -0x1.8p-148 = -0x1.2p-146
+ comp32(single_of_bits(0x80000003) * single_of_bits(0x40400000), 0x80000009, STR(__LINE__)); // -0x1.8p-148 * 0x1.8p+1 = -0x1.2p-146
+ comp32(single_of_bits(0x00000003) * single_of_bits(0xc0400000), 0x80000009, STR(__LINE__)); // 0x1.8p-148 * -0x1.8p+1 = -0x1.2p-146
+ comp32(single_of_bits(0xc0400000) * single_of_bits(0x00000003), 0x80000009, STR(__LINE__)); // -0x1.8p+1 * 0x1.8p-148 = -0x1.2p-146
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x00000001), 0x00000004, STR(__LINE__)); // 0x1p+2 * 0x1p-149 = 0x1p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40800000), 0x00000004, STR(__LINE__)); // 0x1p-149 * 0x1p+2 = 0x1p-147
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x80000001), 0x80000004, STR(__LINE__)); // 0x1p+2 * -0x1p-149 = -0x1p-147
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40800000), 0x80000004, STR(__LINE__)); // -0x1p-149 * 0x1p+2 = -0x1p-147
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x80000001), 0x00000004, STR(__LINE__)); // -0x1p+2 * -0x1p-149 = 0x1p-147
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0800000), 0x00000004, STR(__LINE__)); // -0x1p-149 * -0x1p+2 = 0x1p-147
+}
+
+void f558(void) {
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x00000001), 0x80000004, STR(__LINE__)); // -0x1p+2 * 0x1p-149 = -0x1p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0800000), 0x80000004, STR(__LINE__)); // 0x1p-149 * -0x1p+2 = -0x1p-147
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x00000002), 0x00000008, STR(__LINE__)); // 0x1p+2 * 0x1p-148 = 0x1p-146
+ comp32(single_of_bits(0x00000002) * single_of_bits(0x40800000), 0x00000008, STR(__LINE__)); // 0x1p-148 * 0x1p+2 = 0x1p-146
+ comp32(single_of_bits(0x40800000) * single_of_bits(0x80000002), 0x80000008, STR(__LINE__)); // 0x1p+2 * -0x1p-148 = -0x1p-146
+ comp32(single_of_bits(0x80000002) * single_of_bits(0x40800000), 0x80000008, STR(__LINE__)); // -0x1p-148 * 0x1p+2 = -0x1p-146
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x80000002), 0x00000008, STR(__LINE__)); // -0x1p+2 * -0x1p-148 = 0x1p-146
+ comp32(single_of_bits(0x80000002) * single_of_bits(0xc0800000), 0x00000008, STR(__LINE__)); // -0x1p-148 * -0x1p+2 = 0x1p-146
+ comp32(single_of_bits(0xc0800000) * single_of_bits(0x00000002), 0x80000008, STR(__LINE__)); // -0x1p+2 * 0x1p-148 = -0x1p-146
+ comp32(single_of_bits(0x00000002) * single_of_bits(0xc0800000), 0x80000008, STR(__LINE__)); // 0x1p-148 * -0x1p+2 = -0x1p-146
+}
+
+void f559(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xc0a00000), 0x00000005, STR(__LINE__)); // -0x1p-149 * -0x1.4p+2 = 0x1.4p-147
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0x80000001), 0x00000005, STR(__LINE__)); // -0x1.4p+2 * -0x1p-149 = 0x1.4p-147
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x00000001), 0x00000005, STR(__LINE__)); // 0x1.4p+2 * 0x1p-149 = 0x1.4p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x40a00000), 0x00000005, STR(__LINE__)); // 0x1p-149 * 0x1.4p+2 = 0x1.4p-147
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x40a00000), 0x80000005, STR(__LINE__)); // -0x1p-149 * 0x1.4p+2 = -0x1.4p-147
+ comp32(single_of_bits(0x40a00000) * single_of_bits(0x80000001), 0x80000005, STR(__LINE__)); // 0x1.4p+2 * -0x1p-149 = -0x1.4p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xc0a00000), 0x80000005, STR(__LINE__)); // 0x1p-149 * -0x1.4p+2 = -0x1.4p-147
+ comp32(single_of_bits(0xc0a00000) * single_of_bits(0x00000001), 0x80000005, STR(__LINE__)); // -0x1.4p+2 * 0x1p-149 = -0x1.4p-147
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x4c000000), 0x01800000, STR(__LINE__)); // 0x1p-149 * 0x1p+25 = 0x1p-124
+ comp32(single_of_bits(0x4c000000) * single_of_bits(0x00000001), 0x01800000, STR(__LINE__)); // 0x1p+25 * 0x1p-149 = 0x1p-124
+}
+
+void f560(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xcc000000), 0x01800000, STR(__LINE__)); // -0x1p-149 * -0x1p+25 = 0x1p-124
+ comp32(single_of_bits(0xcc000000) * single_of_bits(0x80000001), 0x01800000, STR(__LINE__)); // -0x1p+25 * -0x1p-149 = 0x1p-124
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x4c000000), 0x81800000, STR(__LINE__)); // -0x1p-149 * 0x1p+25 = -0x1p-124
+ comp32(single_of_bits(0x4c000000) * single_of_bits(0x80000001), 0x81800000, STR(__LINE__)); // 0x1p+25 * -0x1p-149 = -0x1p-124
+ comp32(single_of_bits(0xcc000000) * single_of_bits(0x00000001), 0x81800000, STR(__LINE__)); // -0x1p+25 * 0x1p-149 = -0x1p-124
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xcc000000), 0x81800000, STR(__LINE__)); // 0x1p-149 * -0x1p+25 = -0x1p-124
+ comp32(single_of_bits(0x00000001) * single_of_bits(0x4b000000), 0x00800000, STR(__LINE__)); // 0x1p-149 * 0x1p+23 = 0x1p-126
+ comp32(single_of_bits(0x4b000000) * single_of_bits(0x00000001), 0x00800000, STR(__LINE__)); // 0x1p+23 * 0x1p-149 = 0x1p-126
+ comp32(single_of_bits(0x80000001) * single_of_bits(0xcb000000), 0x00800000, STR(__LINE__)); // -0x1p-149 * -0x1p+23 = 0x1p-126
+ comp32(single_of_bits(0xcb000000) * single_of_bits(0x80000001), 0x00800000, STR(__LINE__)); // -0x1p+23 * -0x1p-149 = 0x1p-126
+}
+
+void f561(void) {
+ comp32(single_of_bits(0x80000001) * single_of_bits(0x4b000000), 0x80800000, STR(__LINE__)); // -0x1p-149 * 0x1p+23 = -0x1p-126
+ comp32(single_of_bits(0x4b000000) * single_of_bits(0x80000001), 0x80800000, STR(__LINE__)); // 0x1p+23 * -0x1p-149 = -0x1p-126
+ comp32(single_of_bits(0xcb000000) * single_of_bits(0x00000001), 0x80800000, STR(__LINE__)); // -0x1p+23 * 0x1p-149 = -0x1p-126
+ comp32(single_of_bits(0x00000001) * single_of_bits(0xcb000000), 0x80800000, STR(__LINE__)); // 0x1p-149 * -0x1p+23 = -0x1p-126
+}
+
+int main(void) {
+ f0();
+ f1();
+ f2();
+ f3();
+ f4();
+ f5();
+ f6();
+ f7();
+ f8();
+ f9();
+ f10();
+ f11();
+ f12();
+ f13();
+ f14();
+ f15();
+ f16();
+ f17();
+ f18();
+ f19();
+ f20();
+ f21();
+ f22();
+ f23();
+ f24();
+ f25();
+ f26();
+ f27();
+ f28();
+ f29();
+ f30();
+ f31();
+ f32();
+ f33();
+ f34();
+ f35();
+ f36();
+ f37();
+ f38();
+ f39();
+ f40();
+ f41();
+ f42();
+ f43();
+ f44();
+ f45();
+ f46();
+ f47();
+ f48();
+ f49();
+ f50();
+ f51();
+ f52();
+ f53();
+ f54();
+ f55();
+ f56();
+ f57();
+ f58();
+ f59();
+ f60();
+ f61();
+ f62();
+ f63();
+ f64();
+ f65();
+ f66();
+ f67();
+ f68();
+ f69();
+ f70();
+ f71();
+ f72();
+ f73();
+ f74();
+ f75();
+ f76();
+ f77();
+ f78();
+ f79();
+ f80();
+ f81();
+ f82();
+ f83();
+ f84();
+ f85();
+ f86();
+ f87();
+ f88();
+ f89();
+ f90();
+ f91();
+ f92();
+ f93();
+ f94();
+ f95();
+ f96();
+ f97();
+ f98();
+ f99();
+ f100();
+ f101();
+ f102();
+ f103();
+ f104();
+ f105();
+ f106();
+ f107();
+ f108();
+ f109();
+ f110();
+ f111();
+ f112();
+ f113();
+ f114();
+ f115();
+ f116();
+ f117();
+ f118();
+ f119();
+ f120();
+ f121();
+ f122();
+ f123();
+ f124();
+ f125();
+ f126();
+ f127();
+ f128();
+ f129();
+ f130();
+ f131();
+ f132();
+ f133();
+ f134();
+ f135();
+ f136();
+ f137();
+ f138();
+ f139();
+ f140();
+ f141();
+ f142();
+ f143();
+ f144();
+ f145();
+ f146();
+ f147();
+ f148();
+ f149();
+ f150();
+ f151();
+ f152();
+ f153();
+ f154();
+ f155();
+ f156();
+ f157();
+ f158();
+ f159();
+ f160();
+ f161();
+ f162();
+ f163();
+ f164();
+ f165();
+ f166();
+ f167();
+ f168();
+ f169();
+ f170();
+ f171();
+ f172();
+ f173();
+ f174();
+ f175();
+ f176();
+ f177();
+ f178();
+ f179();
+ f180();
+ f181();
+ f182();
+ f183();
+ f184();
+ f185();
+ f186();
+ f187();
+ f188();
+ f189();
+ f190();
+ f191();
+ f192();
+ f193();
+ f194();
+ f195();
+ f196();
+ f197();
+ f198();
+ f199();
+ f200();
+ f201();
+ f202();
+ f203();
+ f204();
+ f205();
+ f206();
+ f207();
+ f208();
+ f209();
+ f210();
+ f211();
+ f212();
+ f213();
+ f214();
+ f215();
+ f216();
+ f217();
+ f218();
+ f219();
+ f220();
+ f221();
+ f222();
+ f223();
+ f224();
+ f225();
+ f226();
+ f227();
+ f228();
+ f229();
+ f230();
+ f231();
+ f232();
+ f233();
+ f234();
+ f235();
+ f236();
+ f237();
+ f238();
+ f239();
+ f240();
+ f241();
+ f242();
+ f243();
+ f244();
+ f245();
+ f246();
+ f247();
+ f248();
+ f249();
+ f250();
+ f251();
+ f252();
+ f253();
+ f254();
+ f255();
+ f256();
+ f257();
+ f258();
+ f259();
+ f260();
+ f261();
+ f262();
+ f263();
+ f264();
+ f265();
+ f266();
+ f267();
+ f268();
+ f269();
+ f270();
+ f271();
+ f272();
+ f273();
+ f274();
+ f275();
+ f276();
+ f277();
+ f278();
+ f279();
+ f280();
+ f281();
+ f282();
+ f283();
+ f284();
+ f285();
+ f286();
+ f287();
+ f288();
+ f289();
+ f290();
+ f291();
+ f292();
+ f293();
+ f294();
+ f295();
+ f296();
+ f297();
+ f298();
+ f299();
+ f300();
+ f301();
+ f302();
+ f303();
+ f304();
+ f305();
+ f306();
+ f307();
+ f308();
+ f309();
+ f310();
+ f311();
+ f312();
+ f313();
+ f314();
+ f315();
+ f316();
+ f317();
+ f318();
+ f319();
+ f320();
+ f321();
+ f322();
+ f323();
+ f324();
+ f325();
+ f326();
+ f327();
+ f328();
+ f329();
+ f330();
+ f331();
+ f332();
+ f333();
+ f334();
+ f335();
+ f336();
+ f337();
+ f338();
+ f339();
+ f340();
+ f341();
+ f342();
+ f343();
+ f344();
+ f345();
+ f346();
+ f347();
+ f348();
+ f349();
+ f350();
+ f351();
+ f352();
+ f353();
+ f354();
+ f355();
+ f356();
+ f357();
+ f358();
+ f359();
+ f360();
+ f361();
+ f362();
+ f363();
+ f364();
+ f365();
+ f366();
+ f367();
+ f368();
+ f369();
+ f370();
+ f371();
+ f372();
+ f373();
+ f374();
+ f375();
+ f376();
+ f377();
+ f378();
+ f379();
+ f380();
+ f381();
+ f382();
+ f383();
+ f384();
+ f385();
+ f386();
+ f387();
+ f388();
+ f389();
+ f390();
+ f391();
+ f392();
+ f393();
+ f394();
+ f395();
+ f396();
+ f397();
+ f398();
+ f399();
+ f400();
+ f401();
+ f402();
+ f403();
+ f404();
+ f405();
+ f406();
+ f407();
+ f408();
+ f409();
+ f410();
+ f411();
+ f412();
+ f413();
+ f414();
+ f415();
+ f416();
+ f417();
+ f418();
+ f419();
+ f420();
+ f421();
+ f422();
+ f423();
+ f424();
+ f425();
+ f426();
+ f427();
+ f428();
+ f429();
+ f430();
+ f431();
+ f432();
+ f433();
+ f434();
+ f435();
+ f436();
+ f437();
+ f438();
+ f439();
+ f440();
+ f441();
+ f442();
+ f443();
+ f444();
+ f445();
+ f446();
+ f447();
+ f448();
+ f449();
+ f450();
+ f451();
+ f452();
+ f453();
+ f454();
+ f455();
+ f456();
+ f457();
+ f458();
+ f459();
+ f460();
+ f461();
+ f462();
+ f463();
+ f464();
+ f465();
+ f466();
+ f467();
+ f468();
+ f469();
+ f470();
+ f471();
+ f472();
+ f473();
+ f474();
+ f475();
+ f476();
+ f477();
+ f478();
+ f479();
+ f480();
+ f481();
+ f482();
+ f483();
+ f484();
+ f485();
+ f486();
+ f487();
+ f488();
+ f489();
+ f490();
+ f491();
+ f492();
+ f493();
+ f494();
+ f495();
+ f496();
+ f497();
+ f498();
+ f499();
+ f500();
+ f501();
+ f502();
+ f503();
+ f504();
+ f505();
+ f506();
+ f507();
+ f508();
+ f509();
+ f510();
+ f511();
+ f512();
+ f513();
+ f514();
+ f515();
+ f516();
+ f517();
+ f518();
+ f519();
+ f520();
+ f521();
+ f522();
+ f523();
+ f524();
+ f525();
+ f526();
+ f527();
+ f528();
+ f529();
+ f530();
+ f531();
+ f532();
+ f533();
+ f534();
+ f535();
+ f536();
+ f537();
+ f538();
+ f539();
+ f540();
+ f541();
+ f542();
+ f543();
+ f544();
+ f545();
+ f546();
+ f547();
+ f548();
+ f549();
+ f550();
+ f551();
+ f552();
+ f553();
+ f554();
+ f555();
+ f556();
+ f557();
+ f558();
+ f559();
+ f560();
+ f561();
+ printf("%d error(s) detected.\n", num_errors);
+ return 0;
+}