From 3ef7797ef6fc605dfafb32523261fe1b023aeecb Mon Sep 17 00:00:00 2001 From: Samuel Mimram Date: Fri, 28 Apr 2006 14:59:16 +0000 Subject: Imported Upstream version 8.0pl3+8.1alpha --- doc/INSTALL | 65 + doc/LICENCE | 630 ++++++ doc/Makefile | 300 +++ doc/Makefile.rt | 43 + doc/README | 30 + doc/RecTutorial/RecTutorial.tex | 3606 +++++++++++++++++++++++++++++++++++ doc/RecTutorial/RecTutorial.v | 1229 ++++++++++++ doc/RecTutorial/coqartmacros.tex | 180 ++ doc/RecTutorial/manbiblio.bib | 875 +++++++++ doc/RecTutorial/morebib.bib | 55 + doc/RecTutorial/recmacros.tex | 75 + doc/common/macros.tex | 497 +++++ doc/common/title.tex | 86 + doc/faq/FAQ.tex | 2481 ++++++++++++++++++++++++ doc/faq/axioms.eps | 378 ++++ doc/faq/axioms.fig | 84 + doc/faq/axioms.png | Bin 0 -> 10075 bytes doc/faq/fk.bib | 2221 +++++++++++++++++++++ doc/faq/hevea.sty | 78 + doc/faq/interval_discr.v | 419 ++++ doc/refman/AddRefMan-pre.tex | 58 + doc/refman/Cases.tex | 698 +++++++ doc/refman/Coercion.tex | 541 ++++++ doc/refman/Extraction.tex | 664 +++++++ doc/refman/Helm.tex | 317 +++ doc/refman/Natural.tex | 425 +++++ doc/refman/Omega.tex | 226 +++ doc/refman/Polynom.tex | 504 +++++ doc/refman/Program.tex | 491 +++++ doc/refman/RefMan-add.tex | 54 + doc/refman/RefMan-cas.tex | 692 +++++++ doc/refman/RefMan-cic.tex | 1480 ++++++++++++++ doc/refman/RefMan-coi.tex | 406 ++++ doc/refman/RefMan-com.tex | 280 +++ doc/refman/RefMan-ext.tex | 1173 ++++++++++++ doc/refman/RefMan-gal.tex | 1451 ++++++++++++++ doc/refman/RefMan-ide.tex | 327 ++++ doc/refman/RefMan-ind.tex | 498 +++++ doc/refman/RefMan-int.tex | 147 ++ doc/refman/RefMan-lib.tex | 1102 +++++++++++ doc/refman/RefMan-ltac.tex | 1057 ++++++++++ doc/refman/RefMan-mod.tex | 396 ++++ doc/refman/RefMan-modr.tex | 586 ++++++ doc/refman/RefMan-oth.tex | 773 ++++++++ doc/refman/RefMan-pre.tex | 582 ++++++ doc/refman/RefMan-pro.tex | 389 ++++ doc/refman/RefMan-syn.tex | 1016 ++++++++++ doc/refman/RefMan-tac.tex | 3096 ++++++++++++++++++++++++++++++ doc/refman/RefMan-tacex.tex | 1208 ++++++++++++ doc/refman/RefMan-tus.tex | 2015 +++++++++++++++++++ doc/refman/RefMan-uti.tex | 276 +++ doc/refman/Reference-Manual.tex | 125 ++ doc/refman/Setoid.tex | 158 ++ doc/refman/biblio.bib | 1144 +++++++++++ doc/refman/coqdoc.tex | 476 +++++ doc/refman/coqide-queries.png | Bin 0 -> 27316 bytes doc/refman/coqide.png | Bin 0 -> 20953 bytes doc/refman/cover.html | 36 + doc/refman/headers.tex | 102 + doc/refman/hevea.sty | 78 + doc/refman/index.html | 29 + doc/rt/RefMan-cover.tex | 46 + doc/rt/Tutorial-cover.tex | 48 + doc/stdlib/Library.tex | 62 + doc/stdlib/index-list.html.template | 339 ++++ doc/stdlib/index-trailer.html | 2 + doc/stdlib/make-library-files | 36 + doc/stdlib/make-library-index | 35 + doc/tools/Translator.tex | 898 +++++++++ doc/tutorial/Tutorial.tex | 1584 +++++++++++++++ 70 files changed, 41458 insertions(+) create mode 100644 doc/INSTALL create mode 100644 doc/LICENCE create mode 100644 doc/Makefile create mode 100644 doc/Makefile.rt create mode 100755 doc/README create mode 100644 doc/RecTutorial/RecTutorial.tex create mode 100644 doc/RecTutorial/RecTutorial.v create mode 100644 doc/RecTutorial/coqartmacros.tex create mode 100644 doc/RecTutorial/manbiblio.bib create mode 100644 doc/RecTutorial/morebib.bib create mode 100644 doc/RecTutorial/recmacros.tex create mode 100755 doc/common/macros.tex create mode 100755 doc/common/title.tex create mode 100644 doc/faq/FAQ.tex create mode 100644 doc/faq/axioms.eps create mode 100644 doc/faq/axioms.fig create mode 100644 doc/faq/axioms.png create mode 100644 doc/faq/fk.bib create mode 100644 doc/faq/hevea.sty create mode 100644 doc/faq/interval_discr.v create mode 100644 doc/refman/AddRefMan-pre.tex create mode 100644 doc/refman/Cases.tex create mode 100644 doc/refman/Coercion.tex create mode 100644 doc/refman/Extraction.tex create mode 100644 doc/refman/Helm.tex create mode 100644 doc/refman/Natural.tex create mode 100644 doc/refman/Omega.tex create mode 100644 doc/refman/Polynom.tex create mode 100644 doc/refman/Program.tex create mode 100644 doc/refman/RefMan-add.tex create mode 100644 doc/refman/RefMan-cas.tex create mode 100644 doc/refman/RefMan-cic.tex create mode 100644 doc/refman/RefMan-coi.tex create mode 100644 doc/refman/RefMan-com.tex create mode 100644 doc/refman/RefMan-ext.tex create mode 100644 doc/refman/RefMan-gal.tex create mode 100644 doc/refman/RefMan-ide.tex create mode 100644 doc/refman/RefMan-ind.tex create mode 100644 doc/refman/RefMan-int.tex create mode 100644 doc/refman/RefMan-lib.tex create mode 100644 doc/refman/RefMan-ltac.tex create mode 100644 doc/refman/RefMan-mod.tex create mode 100644 doc/refman/RefMan-modr.tex create mode 100644 doc/refman/RefMan-oth.tex create mode 100644 doc/refman/RefMan-pre.tex create mode 100644 doc/refman/RefMan-pro.tex create mode 100644 doc/refman/RefMan-syn.tex create mode 100644 doc/refman/RefMan-tac.tex create mode 100644 doc/refman/RefMan-tacex.tex create mode 100644 doc/refman/RefMan-tus.tex create mode 100644 doc/refman/RefMan-uti.tex create mode 100644 doc/refman/Reference-Manual.tex create mode 100644 doc/refman/Setoid.tex create mode 100644 doc/refman/biblio.bib create mode 100644 doc/refman/coqdoc.tex create mode 100644 doc/refman/coqide-queries.png create mode 100644 doc/refman/coqide.png create mode 100644 doc/refman/cover.html create mode 100644 doc/refman/headers.tex create mode 100644 doc/refman/hevea.sty create mode 100644 doc/refman/index.html create mode 100644 doc/rt/RefMan-cover.tex create mode 100644 doc/rt/Tutorial-cover.tex create mode 100755 doc/stdlib/Library.tex create mode 100644 doc/stdlib/index-list.html.template create mode 100644 doc/stdlib/index-trailer.html create mode 100755 doc/stdlib/make-library-files create mode 100755 doc/stdlib/make-library-index create mode 100644 doc/tools/Translator.tex create mode 100755 doc/tutorial/Tutorial.tex (limited to 'doc') diff --git a/doc/INSTALL b/doc/INSTALL new file mode 100644 index 00000000..9223a41b --- /dev/null +++ b/doc/INSTALL @@ -0,0 +1,65 @@ + The Coq documentation + ===================== + +The Coq documentation includes + +- A Reference Manual +- A Tutorial +- A document presenting the Coq standard library +- A list of questions/answers in the FAQ style + +The sources of the documents are mainly made of LaTeX code from which +user-readable PostScript or PDF files, or a user-browsable bunch of +html files are generated. + +Prerequisite +------------ + +To produce the documents, you need the coqtop, coq-tex, coqdoc and +gallina tools, with same version number as the current +documentation. These four tools normally come with any basic Coq +installation. + +In addition, to produce the PostScript documents, the following tools +are needed: + + - latex (latex2e) + - dvips + - bibtex + - makeindex + - pngtopnm and pnmtops (for the Reference Manual and the FAQ) + +To produce the PDF documents, the following tools are needed: + + - pdflatex + - bibtex + +To produce the html documents, the following tools are needed: + + - hevea (e.g. 1.07 works) + +To produce the documentation of the standard library, a source copy of +the coq distribution is needed. + +Compilation +----------- + +To produce all PostScript documents, do: make all-ps +To produce all PDF documents, do: make all-pdf +To produce all html documents, do: make all-html +To produce all formats of the Reference Manual, do: make refman +To produce all formats of the Tutorial, do: make tutorial +To produce all formats of the Coq Standard Library, do: make stdlib +To produce all formats of the FAQ, do: make faq + +Installation +------------ + +To install all produced documents, do: + + make DOCDIR=/some/directory/for/documentation install + +DOCDIR defauts to /usr/share/doc/coq-x.y were x.y is the version number + + + diff --git a/doc/LICENCE b/doc/LICENCE new file mode 100644 index 00000000..99087480 --- /dev/null +++ b/doc/LICENCE @@ -0,0 +1,630 @@ +The Coq Reference Manual is a collective work from the Coq Development +Team whose members are listed in the file CREDITS of the Coq source +package. All related documents (the LaTeX and BibTeX sources, the +embedded png files, and the PostScript, PDF and html outputs) are +copyright (c) INRIA 1999-2006. The material connected to the Reference +Manual may be distributed only subject to the terms and conditions set +forth in the Open Publication License, v1.0 or later (the latest +version is presently available at http://www.opencontent.org/openpub/). +Options A and B are *not* elected. + +The Coq Tutorial is a work by Gérard Huet, Gilles Kahn and Christine +Paulin-Mohring. All documents (the LaTeX source and the PostScript, +PDF and html outputs) are copyright (c) INRIA 1999-2006. The material +connected to the Coq Tutorial may be distributed only subject to the +terms and conditions set forth in the Open Publication License, v1.0 +or later (the latest version is presently available at +http://www.opencontent.org/openpub/). Options A and B are *not* +elected. + +The Coq Standard Library is a collective work from the Coq Development +Team whose members are listed in the file CREDITS of the Coq source +package. All related documents (the Coq vernacular source files and +the PostScript, PDF and html outputs) are copyright (c) INRIA +1999-2006. The material connected to the Standard Library is +distributed under the terms of the Lesser General Public License +version 2.1 or later. + +The FAQ (Coq for the Clueless) is a work by Pierre Castéran, Hugo +Herbelin, Florent Kirchner, Benjamin Monate, and Julien Narboux. All +documents (the LaTeX source and the PostScript, PDF and html outputs) +are copyright (c) INRIA 2004-2006. The material connected to the FAQ +(Coq for the Clueless) may be distributed only subject to the terms +and conditions set forth in the Open Publication License, v1.0 or +later (the latest version is presently available at +http://www.opencontent.org/openpub/). Options A and B are *not* +elected. + +The Tutorial on [Co-]Inductive Types in Coq is a work by Pierre +Castéran and Eduardo Gimenez. All related documents (the LaTeX and +BibTeX sources and the PostScript, PDF and html outputs) are copyright +(c) INRIA 1997-2006. The material connected to the Tutorial on +[Co-]Inductive Types in Coq may be distributed only subject to the +terms and conditions set forth in the Open Publication License, v1.0 +or later (the latest version is presently available at +http://www.opencontent.org/openpub/). Options A and B are +*not* elected. + +---------------------------------------------------------------------- + + *Open Publication License* + v1.0, 8 June 1999 + + +*I. REQUIREMENTS ON BOTH UNMODIFIED AND MODIFIED VERSIONS* + +The Open Publication works may be reproduced and distributed in whole or +in part, in any medium physical or electronic, provided that the terms +of this license are adhered to, and that this license or an +incorporation of it by reference (with any options elected by the +author(s) and/or publisher) is displayed in the reproduction. + +Proper form for an incorporation by reference is as follows: + + Copyright (c) by . This material + may be distributed only subject to the terms and conditions set + forth in the Open Publication License, vX.Y or later (the latest + version is presently available at http://www.opencontent.org/openpub/). + +The reference must be immediately followed with any options elected by +the author(s) and/or publisher of the document (see section VI). + +Commercial redistribution of Open Publication-licensed material is +permitted. + +Any publication in standard (paper) book form shall require the citation +of the original publisher and author. The publisher and author's names +shall appear on all outer surfaces of the book. On all outer surfaces of +the book the original publisher's name shall be as large as the title of +the work and cited as possessive with respect to the title. + + +*II. COPYRIGHT* + +The copyright to each Open Publication is owned by its author(s) or +designee. + + +*III. SCOPE OF LICENSE* + +The following license terms apply to all Open Publication works, unless +otherwise explicitly stated in the document. + +Mere aggregation of Open Publication works or a portion of an Open +Publication work with other works or programs on the same media shall +not cause this license to apply to those other works. The aggregate work +shall contain a notice specifying the inclusion of the Open Publication +material and appropriate copyright notice. + +SEVERABILITY. If any part of this license is found to be unenforceable +in any jurisdiction, the remaining portions of the license remain in force. + +NO WARRANTY. Open Publication works are licensed and provided "as is" +without warranty of any kind, express or implied, including, but not +limited to, the implied warranties of merchantability and fitness for a +particular purpose or a warranty of non-infringement. + + +*IV. REQUIREMENTS ON MODIFIED WORKS* + +All modified versions of documents covered by this license, including +translations, anthologies, compilations and partial documents, must meet +the following requirements: + + 1. The modified version must be labeled as such. + 2. The person making the modifications must be identified and the + modifications dated. + 3. Acknowledgement of the original author and publisher if applicable + must be retained according to normal academic citation practices. + 4. The location of the original unmodified document must be identified. + 5. The original author's (or authors') name(s) may not be used to + assert or imply endorsement of the resulting document without the + original author's (or authors') permission. + + +*V. GOOD-PRACTICE RECOMMENDATIONS * + +In addition to the requirements of this license, it is requested from +and strongly recommended of redistributors that: + + 1. If you are distributing Open Publication works on hardcopy or + CD-ROM, you provide email notification to the authors of your + intent to redistribute at least thirty days before your manuscript + or media freeze, to give the authors time to provide updated + documents. This notification should describe modifications, if + any, made to the document. + 2. All substantive modifications (including deletions) be either + clearly marked up in the document or else described in an + attachment to the document. + 3. Finally, while it is not mandatory under this license, it is + considered good form to offer a free copy of any hardcopy and + CD-ROM expression of an Open Publication-licensed work to its + author(s). + + +*VI. LICENSE OPTIONS* + +The author(s) and/or publisher of an Open Publication-licensed document +may elect certain options by appending language to the reference to or +copy of the license. These options are considered part of the license +instance and must be included with the license (or its incorporation by +reference) in derived works. + +A. To prohibit distribution of substantively modified versions without +the explicit permission of the author(s). "Substantive modification" is +defined as a change to the semantic content of the document, and +excludes mere changes in format or typographical corrections. + +To accomplish this, add the phrase `Distribution of substantively +modified versions of this document is prohibited without the explicit +permission of the copyright holder.' to the license reference or copy. + +B. To prohibit any publication of this work or derivative works in whole +or in part in standard (paper) book form for commercial purposes is +prohibited unless prior permission is obtained from the copyright holder. + +To accomplish this, add the phrase 'Distribution of the work or +derivative of the work in any standard (paper) book form is prohibited +unless prior permission is obtained from the copyright holder.' to the +license reference or copy. + +---------------------------------------------------------------------- + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000..07b13039 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,300 @@ +# Makefile for the Coq documentation + +# COQTOP needs to be set to a coq source repository + +# To compile documentation, you need the following tools: +# Dvi: latex (latex2e), bibtex, makeindex +# Pdf: pdflatex +# Html: hevea (http://hevea.inria.fr) >= 1.05 + +include ../config/Makefile + + +LATEX=latex +BIBTEX=bibtex -min-crossrefs=10 +MAKEINDEX=makeindex +PDFLATEX=pdflatex + +HEVEALIB=/usr/local/lib/hevea:/usr/lib/hevea +TEXINPUTS=$(HEVEALIB):.: + +DOCCOQTOP=$(COQTOP)/bin/coqtop +COQTEX=$(COQTOP)/bin/coq-tex -n 72 -image $(DOCCOQTOP) -v -sl -small +COQDOC=$(COQTOP)/bin/coqdoc + +#VERSION=POSITIONNEZ-CETTE-VARIABLE + +###################################################################### +### General rules +###################################################################### + +all: all-html all-pdf all-ps + +all-html:\ + tutorial/Tutorial.v.html refman/html/index.html \ + faq/html/index.html stdlib/html/index.html RecTutorial/RecTutorial.v.html + +all-pdf:\ + tutorial/Tutorial.v.pdf refman/Reference-Manual.pdf \ + faq/FAQ.v.pdf stdlib/Library.pdf RecTutorial/RecTutorial.v.pdf + +all-ps:\ + tutorial/Tutorial.v.ps refman/Reference-Manual.ps \ + faq/FAQ.v.ps stdlib/Library.ps RecTutorial/RecTutorial.v.ps + +refman:\ + refman/html/index.html refman/Reference-Manual.ps refman/Reference-Manual.pdf + +tutorial:\ + tutorial/Tutorial.v.html tutorial/Tutorial.v.ps tutorial/Tutorial.v.pdf + +stdlib:\ + stdlib/html/index.html stdlib/Library.ps stdlib/Library.pdf + +faq:\ + faq/html/index.html faq/FAQ.v.ps faq/FAQ.v.pdf + +rectutorial:\ + RecTutorial/RecTutorial.v.html \ + RecTutorial/RecTutorial.v.ps RecTutorial/RecTutorial.v.pdf + +###################################################################### +### Implicit rules +###################################################################### + +.SUFFIXES: .dvi .tex .v.tex .ps .pdf .eps .png + +%.v.tex: %.tex + (cd `dirname $<`; $(COQTEX) `basename $<`) + +%.ps: %.dvi + (cd `dirname $<`; dvips -o `basename $@` `basename $<`) + +%.eps: %.png + pngtopnm $< | pnmtops -equalpixels -noturn -rle > $@ + +clean: + rm -f */*.dvi */*.aux */*.log */*.bbl */*.blg */*.toc \ + */*.idx */*~ */*.ilg */*.ind */*.dvi.gz */*.ps.gz */*.pdf.gz\ + */*.???idx */*.???ind */*.v.tex */*.atoc */*.lof\ + */*.hatoc */*.haux */*.hcomind */*.herrind */*.hidx */*.hind \ + */*.htacind */*.htoc */*.v.html + rm -f stdlib/index-list.html stdlib/index-body.html \ + stdlib/Library.coqdoc.tex stdlib/library.files \ + stdlib/library.files.ls + rm -f refman/euclid.ml{,i} refman/heapsort.ml{,i} + rm -f common/version.tex + rm -f refman/*.eps refman/Reference-Manual.html + +cleanall: clean + rm -f */*.ps */*.pdf + rm -rf refman/html stdlib/html faq/html tutorial/tutorial.v.html + +###################################################################### +# Common +###################################################################### + +COMMON=common/version.tex common/title.tex common/macros.tex + +### Version + +common/version.tex: Makefile + echo "\newcommand{\coqversion}{$(VERSION)}" > common/version.tex + +###################################################################### +# Reference Manual +###################################################################### + +REFMANCOQTEXFILES=\ + refman/RefMan-gal.v.tex refman/RefMan-ext.v.tex \ + refman/RefMan-mod.v.tex refman/RefMan-tac.v.tex \ + refman/RefMan-cic.v.tex refman/RefMan-lib.v.tex \ + refman/RefMan-tacex.v.tex refman/RefMan-syn.v.tex \ + refman/RefMan-oth.v.tex \ + refman/Cases.v.tex refman/Coercion.v.tex refman/Extraction.v.tex \ + refman/Program.v.tex refman/Omega.v.tex refman/Polynom.v.tex \ + refman/Setoid.v.tex refman/Helm.tex # refman/Natural.v.tex + +REFMANTEXFILES=\ + refman/headers.tex \ + refman/Reference-Manual.tex refman/RefMan-pre.tex \ + refman/RefMan-int.tex refman/RefMan-pro.tex \ + refman/RefMan-com.tex refman/RefMan-ltac.tex \ + refman/RefMan-uti.tex refman/RefMan-ide.tex \ + refman/RefMan-add.tex refman/RefMan-modr.tex \ + $(REFMANCOQTEXFILES) \ + +REFMANEPSFILES= refman/coqide.eps refman/coqide-queries.eps + +REFMANFILES=\ + $(REFMANTEXFILES) $(COMMON) $(REFMANEPSFILES) refman/biblio.bib + +REFMANPNGFILES=$(REFMANEPSFILES:.eps=.png) + +### Reference Manual (printable format) + +# The second LATEX compilation is necessary otherwise the pages of the index +# are not correct (don't know why...) - BB +refman/Reference-Manual.dvi: $(REFMANFILES) + (cd refman;\ + $(LATEX) Reference-Manual;\ + $(BIBTEX) Reference-Manual;\ + $(LATEX) Reference-Manual;\ + $(MAKEINDEX) Reference-Manual;\ + $(MAKEINDEX) Reference-Manual.tacidx -o Reference-Manual.tacind;\ + $(MAKEINDEX) Reference-Manual.comidx -o Reference-Manual.comind;\ + $(MAKEINDEX) Reference-Manual.erridx -o Reference-Manual.errind;\ + $(LATEX) Reference-Manual;\ + $(LATEX) Reference-Manual) + +refman/Reference-Manual.pdf: refman/Reference-Manual.tex + (cd refman; $(PDFLATEX) Reference-Manual.tex) + +### Reference Manual (browsable format) + +refman/Reference-Manual.html: refman/Reference-Manual.dvi # to ensure bbl file + (cd refman; hevea -fix -exec xxdate.exe ./Reference-Manual.tex) + +refman/html/index.html: refman/Reference-Manual.html $(REFMANPNGFILES) \ + refman/cover.html refman/index.html + - rm -rf refman/html + mkdir refman/html + cp $(REFMANPNGFILES) refman/html + (cd refman/html; hacha -o toc.html ../Reference-Manual.html) + cp refman/cover.html refman/html + cp refman/index.html refman/html + +###################################################################### +# Tutorial +###################################################################### + +tutorial/Tutorial.v.dvi: common/version.tex common/title.tex tutorial/Tutorial.v.tex + (cd tutorial; $(LATEX) Tutorial.v) + +tutorial/Tutorial.v.pdf: common/version.tex common/title.tex tutorial/Tutorial.v.dvi + (cd tutorial; $(PDFLATEX) Tutorial.v.tex) + +tutorial/Tutorial.v.html: tutorial/Tutorial.v.tex + (cd tutorial; hevea -exec xxdate.exe Tutorial.v) + + +###################################################################### +# FAQ +###################################################################### + +faq/FAQ.v.dvi: common/version.tex common/title.tex faq/FAQ.v.tex + (cd faq;\ + $(LATEX) FAQ.v;\ + $(BIBTEX) FAQ.v;\ + $(LATEX) FAQ.v;\ + $(LATEX) FAQ.v) + +faq/FAQ.v.pdf: common/version.tex common/title.tex faq/FAQ.v.dvi faq/axioms.png + (cd faq; $(PDFLATEX) FAQ.v.tex) + +faq/FAQ.v.html: faq/FAQ.v.dvi # to ensure FAQ.v.bbl + (cd faq; hevea -fix FAQ.v.tex) + +faq/html/index.html: faq/FAQ.v.html + - rm -rf faq/html + mkdir faq/html + cp faq/interval_discr.v faq/axioms.png faq/html + cp faq/FAQ.v.html faq/html/index.html + +###################################################################### +# Standard library +###################################################################### + +GLOBDUMP=$(COQTOP)/glob.dump + +LIBDIRS= Logic Bool Arith ZArith Reals Lists Sets Relations Sorting Wellfounded IntMap FSets + +### Standard library (browsable html format) + +stdlib/index-body.html: $(GLOBDUMP) + - rm -rf stdlib/html + mkdir stdlib/html + (cd stdlib/html;\ + $(COQDOC) -q --multi-index --html --glob-from $(GLOBDUMP)\ + -R $(COQTOP)/theories Coq $(COQTOP)/theories/*/*.v) + mv stdlib/html/index.html stdlib/index-body.html + +stdlib/index-list.html: stdlib/index-list.html.template + COQTOP=$(COQTOP) ./stdlib/make-library-index stdlib/index-list.html + +stdlib/html/index.html: stdlib/index-list.html stdlib/index-body.html stdlib/index-trailer.html + cat stdlib/index-list.html > $@ + sed -n -e '//,/<\/table>/p' stdlib/index-body.html >> $@ + cat stdlib/index-trailer.html >> $@ + +### Standard library (printable format - produces > 350 pages) + +stdlib/Library.coqdoc.tex: + (for dir in $(LIBDIRS) ; do \ + $(COQDOC) -q --gallina --body-only --latex --stdout \ + -R $(COQTOP)/theories Coq "$(COQTOP)/theories/$$dir/"*.v >> $@ ; done) + +stdlib/Library.dvi: $(COMMON) stdlib/Library.coqdoc.tex stdlib/Library.tex + (cd stdlib;\ + $(LATEX) Library;\ + $(LATEX) Library) + +stdlib/Library.pdf: $(COMMON) stdlib/Library.coqdoc.tex stdlib/Library.dvi + (cd stdlib; $(PDFLATEX) Library) + +###################################################################### +# Tutorial on inductive types +###################################################################### + +RecTutorial/RecTutorial.v.dvi: common/version.tex common/title.tex RecTutorial/RecTutorial.v.tex + (cd RecTutorial;\ + $(LATEX) RecTutorial.v;\ + $(BIBTEX) RecTutorial.v;\ + $(LATEX) RecTutorial.v;\ + $(LATEX) RecTutorial.v) + +RecTutorial/RecTutorial.v.pdf: common/version.tex common/title.tex RecTutorial/RecTutorial.v.dvi + (cd RecTutorial; $(PDFLATEX) RecTutorial.v.tex) + +RecTutorial/RecTutorial.v.html: RecTutorial/RecTutorial.v.tex + (cd RecTutorial; hevea -exec xxdate.exe RecTutorial.v) + + +###################################################################### +# Install all documentation files +###################################################################### + +COQINSTALLPREFIX= +DOCDIR=/usr/local/share/doc/coq-8.0 +FULLDOCDIR=$(COQINSTALLPREFIX)$(DOCDIR) +HTMLINSTALLDIR=$(FULLDOCDIR)/html +PRINTABLEINSTALLDIR=$(FULLDOCDIR)/ps + +install-doc: install-meta install-doc-html install-doc-printable + +install-meta: + mkdir $(DOCDIC) + cp LICENCE $(DOCDIC)/LICENCE.doc +# cp $(COQTOP)/LICENCE $(COQTOP)/CREDITS $(COQTOP)/COPYRIGHT $(DOCDIC) +# cp $(COQTOP)/README $(COQTOP)/CHANGES $(DOCDIC) + +install-doc-html: all-html + mkdir $(HTMLINSTALLDIR) + cp -r refman/html $(HTMLINSTALLDIR)/refman + cp -r stdlib/html $(HTMLINSTALLDIR)/stdlib + cp -r tutorial/tutorial.html $(HTMLINSTALLDIR)/ + cp -r RecTutorial/RecTutorial.html $(HTMLINSTALLDIR)/ + cp -r faq/html $(HTMLINSTALLDIR)/faq + +install-doc-printable: all-pdf all-ps + mkdir $(PRINTABLEINSTALLDIR) + cp -r refman/Reference-manual.pdf $(PRINTABLEINSTALLDIR) + cp -r stdlib/Library.pdf $(PRINTABLEINSTALLDIR) + cp -r tutorial/Tutorial.v.pdf $(PRINTABLEINSTALLDIR)/Tutorial.pdf + cp -r RecTutorial/RecTutorial.v.pdf $(PRINTABLEINSTALLDIR)/RecTutorial.pdf + cp -r faq/FAQ.v.pdf $(PRINTABLEINSTALLDIR)/FAQ.pdf + cp -r refman/Reference-manual.ps $(PRINTABLEINSTALLDIR) + cp -r stdlib/Library.ps $(PRINTABLEINSTALLDIR) + cp -r tutorial/Tutorial.v.ps $(PRINTABLEINSTALLDIR)/Tutorial.ps + cp -r RecTutorial/RecTutorial.v.ps $(PRINTABLEINSTALLDIR)/RecTutorial.ps + cp -r faq/FAQ.v.ps $(PRINTABLEINSTALLDIR)/FAQ.ps diff --git a/doc/Makefile.rt b/doc/Makefile.rt new file mode 100644 index 00000000..6c328134 --- /dev/null +++ b/doc/Makefile.rt @@ -0,0 +1,43 @@ +# Makefile for building Coq Technical Reports + +# if coqc,coqtop,coq-tex are not in your PATH, you need the environment +# variable COQBIN to be correctly set +# (COQTOP is autodetected) +# (some files are preprocessed using Coq and some part of the documentation +# is automatically built from the theories sources) + +# To compile documentation, you need the following tools: +# Dvi: latex (latex2e), bibtex, makeindex, dviselect (package RPM dviutils) +# Ps: dvips, psutils (ftp://ftp.dcs.ed.ac.uk/pub/ajcd/psutils.tar.gz) +# Pdf: pdflatex +# Html: +# - hevea: http://para.inria.fr/~maranget/hevea/ +# - htmlSplit: http://coq.inria.fr/~delahaye +# Rapports INRIA: dviselect, rrkit (par Michel Mauny) + +include ./Makefile + +################### +# RT +################### +# Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) +rt/Reference-Manual-RT.dvi: refman/Reference-Manual.dvi rt/RefMan-cover.tex + dviselect -i refman/Reference-Manual.dvi -o rt/RefMan-body.dvi 3: + (cd rt; $(LATEX) RefMan-cover.tex) + set a=`tail -1 refman/Reference-Manual.log`;\ + set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ + (cd rt; if $(TEST) "$$a = 0";\ + then rrkit RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ + else rrkit -odd RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ + fi) + +# Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) +rt/Tutorial-RT.dvi : tutorial/Tutorial.v.dvi rt/Tutorial-cover.tex + dviselect -i rt/Tutorial.v.dvi -o rt/Tutorial-body.dvi 3: + (cd rt; $(LATEX) Tutorial-cover.tex) + set a=`tail -1 tutorial/Tutorial.v.log`;\ + set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ + (cd rt; if $(TEST) "$$a = 0";\ + then rrkit Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ + else rrkit -odd Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ + fi) diff --git a/doc/README b/doc/README new file mode 100755 index 00000000..14cb6e44 --- /dev/null +++ b/doc/README @@ -0,0 +1,30 @@ +You can get the whole documentation of Coq in the tar file all-ps-docs.tar. + +You can also get separately each document. The documentation of Coq +V8.0 is divided into the following documents : + + * Tutorial.ps: An introduction to the use of the Coq Proof Assistant; + + * Reference-Manual.ps: + + Base chapters: + - the description of Gallina, the language of Coq + - the description of the Vernacular, the commands of Coq + - the description of each tactic + - index on tactics, commands and error messages + + Additional chapters: + - the extended Cases (C.Cornes) + - the coercions (A. Saïbi) + - the tactic Omega (P. Crégut) + - the extraction features (J.-C. Filliâtre and P. Letouzey) + - the tactic Ring (S. Boutin and P. Loiseleur) + - the Setoid_replace tactic (C. Renard) + - etc. + + * Library.ps: A description of the Coq standard library; + + * rectypes.ps : A tutorial on recursive types by Eduardo Gimenez + +Documentation is also available in the PDF format and HTML format +(online at http://coq.inria.fr or by ftp in the file doc-html.tar.gz). diff --git a/doc/RecTutorial/RecTutorial.tex b/doc/RecTutorial/RecTutorial.tex new file mode 100644 index 00000000..9ee913d4 --- /dev/null +++ b/doc/RecTutorial/RecTutorial.tex @@ -0,0 +1,3606 @@ +\documentclass[11pt]{article} +\title{A Tutorial on [Co-]Inductive Types in Coq} +\author{Eduardo Gim\'enez\thanks{Eduardo.Gimenez@inria.fr}, +Pierre Cast\'eran\thanks{Pierre.Casteran@labri.fr}} +\date{May 1998 --- \today} + +\usepackage{multirow} +\usepackage{aeguill} +%\externaldocument{RefMan-gal.v} +%\externaldocument{RefMan-ext.v} +%\externaldocument{RefMan-tac.v} +%\externaldocument{RefMan-oth} +%\externaldocument{RefMan-tus.v} +%\externaldocument{RefMan-syn.v} +%\externaldocument{Extraction.v} +\input{recmacros} +\input{coqartmacros} +\newcommand{\refmancite}[1]{{}} +%\newcommand{\refmancite}[1]{\cite{coqrefman}} +%\newcommand{\refmancite}[1]{\cite[#1] {]{coqrefman}} + +\usepackage[latin1]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{makeidx} +%\usepackage{multind} +\usepackage{alltt} +\usepackage{verbatim} +\usepackage{amssymb} +\usepackage{amsmath} +\usepackage{theorem} +\usepackage[dvips]{epsfig} +\usepackage{epic} +\usepackage{eepic} +\usepackage{ecltree} +\usepackage{moreverb} +\usepackage{color} +\usepackage{pifont} +\usepackage{xr} +\usepackage{url} + +\usepackage{alltt} +\renewcommand{\familydefault}{ptm} +\renewcommand{\seriesdefault}{m} +\renewcommand{\shapedefault}{n} +\newtheorem{exercise}{Exercise}[section] +\makeindex +\begin{document} +\maketitle + +\begin{abstract} +This document\footnote{The first versions of this document were entirely written by Eduardo Gimenez. +Pierre Cast\'eran wrote the 2004 revision.} is an introduction to the definition and +use of inductive and co-inductive types in the {\coq} proof environment. It explains how types like natural numbers and infinite streams are defined +in {\coq}, and the kind of proof techniques that can be used to reason +about them (case analysis, induction, inversion of predicates, +co-induction, etc). Each technique is illustrated through an +executable and self-contained {\coq} script. +\end{abstract} +%\RRkeyword{Proof environments, recursive types.} +%\makeRT + +\addtocontents{toc}{\protect \thispagestyle{empty}} +\pagenumbering{arabic} + +\cleardoublepage +\tableofcontents +\clearpage + +\section{About this document} + +This document is an introduction to the definition and use of +inductive and co-inductive types in the {\coq} proof environment. It was born from the +notes written for the course about the version V5.10 of {\coq}, given +by Eduardo Gimenez at +the Ecole Normale Sup\'erieure de Lyon in March 1996. This article is +a revised and improved version of these notes for the version V8.0 of +the system. + + +We assume that the reader has some familiarity with the +proofs-as-programs paradigm of Logic \cite{Coquand:metamathematical} and the generalities +of the {\coq} system \cite{coqrefman}. You would take a greater advantage of +this document if you first read the general tutorial about {\coq} and +{\coq}'s FAQ, both available on \cite{coqsite}. +A text book \cite{coqart}, accompanied with a lot of +examples and exercises \cite{Booksite}, presents a detailed description +of the {\coq} system and its underlying +formalism: the Calculus of Inductive Construction. +Finally, the complete description of {\coq} is given in the reference manual +\cite{coqrefman}. Most of the tactics and commands we describe have +several options, which we do not present exhaustively. +If some script herein uses a non described feature, please refer to +the Reference Manual. + + +If you are familiar with other proof environments +based on type theory and the LCF style ---like PVS, LEGO, Isabelle, +etc--- then you will find not difficulty to guess the unexplained +details. + +The better way to read this document is to start up the {\coq} system, +type by yourself the examples and exercises, and observe the +behavior of the system. All the examples proposed in this tutorial +can be downloaded from the same site as the present document. + + +The tutorial is organised as follows. The next section describes how +inductive types are defined in {\coq}, and introduces some useful ones, +like natural numbers, the empty type, the propositional equality type, +and the logical connectives. Section \ref{CaseAnalysis} explains +definitions by pattern-matching and their connection with the +principle of case analysis. This principle is the most basic +elimination rule associated with inductive or co-inductive types + and follows a +general scheme that we illustrate for some of the types introduced in +Section \ref{Introduction}. Section \ref{CaseTechniques} illustrates +the pragmatics of this principle, showing different proof techniques +based on it. Section \ref{StructuralInduction} introduces definitions +by structural recursion and proofs by induction. +Section~\ref{CaseStudy} presents some elaborate techniques +about dependent case analysis. Finally, Section +\ref{CoInduction} is a brief introduction to co-inductive types +--i.e., types containing infinite objects-- and the principle of +co-induction. + +Thanks to Bruno Barras, Yves Bertot, Hugo Herbelin, Jean-Fran\c{c}ois Monin +and Michel L\'evy for their help. + +\subsection*{Lexical conventions} +The \texttt{typewriter} font is used to represent text +input by the user, while the \textit{italic} font is used to represent +the text output by the system as answers. + + +Moreover, the mathematical symbols \coqle{}, \coqdiff, \(\exists\), +\(\forall\), \arrow{}, $\rightarrow{}$ \coqor{}, \coqand{}, and \funarrow{} +stand for the character strings \citecoq{<=}, \citecoq{<>}, +\citecoq{exists}, \citecoq{forall}, \citecoq{->}, \citecoq{<-}, +\texttt{\char'134/}, \texttt{/\char'134}, and \citecoq{=>}, +respectively. For instance, the \coq{} statement +%V8 A prendre +% inclusion numero 1 +% traduction numero 1 +\begin{alltt} +\hide{Open Scope nat_scope. Check (}forall A:Set,(exists x : A, forall (y:A), x <> y) -> 2 = 3\hide{).} +\end{alltt} +is written as follows in this tutorial: +%V8 A prendre +% inclusion numero 2 +% traduction numero 2 +\begin{alltt} +\hide{Check (}{\prodsym}A:Set,(\exsym{}x:A, {\prodsym}y:A, x {\coqdiff} y) \arrow{} 2 = 3\hide{).} +\end{alltt} + +When a fragment of \coq{} input text appears in the middle of +regular text, we often place this fragment between double quotes +``\dots.'' These double quotes do not belong to the \coq{} syntax. + +Finally, any +string enclosed between \texttt{(*} and \texttt{*)} is a comment and +is ignored by the \coq{} system. + +\section{Introducing Inductive Types} +\label{Introduction} + +Inductive types are types closed with respect to their introduction +rules. These rules explain the most basic or \textsl{canonical} ways +of constructing an element of the type. In this sense, they +characterize the recursive type. Different rules must be considered as +introducing different objects. In order to fix ideas, let us introduce +in {\coq} the most well-known example of a recursive type: the type of +natural numbers. + +%V8 A prendre +\begin{alltt} +Inductive nat : Set := + | O : nat + | S : nat\arrow{}nat. +\end{alltt} + +The definition of a recursive type has two main parts. First, we +establish what kind of recursive type we will characterize (a set, in +this case). Second, we present the introduction rules that define the +type ({\Z} and {\SUCC}), also called its {\sl constructors}. The constructors +{\Z} and {\SUCC} determine all the elements of this type. In other +words, if $n\mbox{:}\nat$, then $n$ must have been introduced either +by the rule {\Z} or by an application of the rule {\SUCC} to a +previously constructed natural number. In this sense, we can say +that {\nat} is \emph{closed}. On the contrary, the type +$\Set$ is an {\it open} type, since we do not know {\it a priori} all +the possible ways of introducing an object of type \texttt{Set}. + +After entering this command, the constants {\nat}, {\Z} and {\SUCC} are +available in the current context. We can see their types using the +\texttt{Check} command \refmancite{Section \ref{Check}}: + +%V8 A prendre +\begin{alltt} +Check nat. +\it{}nat : Set +\tt{}Check O. +\it{}O : nat +\tt{}Check S. +\it{}S : nat {\arrow} nat +\end{alltt} + +Moreover, {\coq} adds to the context three constants named + $\natind$, $\natrec$ and $\natrect$, which + correspond to different principles of structural induction on +natural numbers that {\coq} infers automatically from the definition. We +will come back to them in Section \ref{StructuralInduction}. + + +In fact, the type of natural numbers as well as several useful +theorems about them are already defined in the basic library of {\coq}, +so there is no need to introduce them. Therefore, let us throw away +our (re)definition of {\nat}, using the command \texttt{Reset}. + +%V8 A prendre +\begin{alltt} +Reset nat. +Print nat. +\it{}Inductive nat : Set := O : nat | S : nat \arrow{} nat +For S: Argument scope is [nat_scope] +\end{alltt} + +Notice that \coq{}'s \emph{interpretation scope} for natural numbers +(called \texttt{nat\_scope}) +allows us to read and write natural numbers in decimal form (see \cite{coqrefman}). For instance, the constructor \texttt{O} can be read or written +as the digit $0$, and the term ``~\texttt{S (S (S O))}~'' as $3$. + +%V8 A prendre +\begin{alltt} +Check O. +\it 0 : nat. +\tt +Check (S (S (S O))). +\it 3 : nat +\end{alltt} + +Let us now take a look to some other +recursive types contained in the standard library of {\coq}. + +\subsection{Lists} +Lists are defined in library \citecoq{List}: + +\begin{alltt} +Require Import List. +Print list. +\it +Inductive list (A : Set) : Set := + nil : list A | cons : A {\arrow} list A {\arrow} list A +For nil: Argument A is implicit +For cons: Argument A is implicit +For list: Argument scope is [type_scope] +For nil: Argument scope is [type_scope] +For cons: Argument scopes are [type_scope _ _] +\end{alltt} + +In this definition, \citecoq{A} is a \emph{general parameter}, global +to both constructors. +This kind of definition allows us to build a whole family of +inductive types, indexed over the sort \citecoq{Set}. +This can be observed if we consider the type of identifiers +\citecoq{list}, \citecoq{cons} and \citecoq{nil}. +Notice the notation \citecoq{(A := \dots)} which must be used +when {\coq}'s type inference algorithm cannot infer the implicit +parameter \citecoq{A}. +\begin{alltt} +Check list. +\it list + : Set {\arrow} Set + +\tt Check (nil (A:=nat)). +\it nil + : list nat + +\tt Check (nil (A:= nat {\arrow} nat)). +\it nil + : list (nat {\arrow} nat) + +\tt Check (fun A: Set {\funarrow} (cons (A:=A))). +\it fun A : Set {\funarrow} cons (A:=A) + : {\prodsym} A : Set, A {\arrow} list A {\arrow} list A + +\tt Check (cons 3 (cons 2 nil)). +\it 3 :: 2 :: nil + : list nat +\end{alltt} + +\subsection{Vectors.} +\label{vectors} + +Like \texttt{list}, \citecoq{vector} is a polymorphic type: +if $A$ is a set, and $n$ a natural number, ``~\citecoq{vector $A$ $n$}~'' +is the type of vectors of elements of $A$ and size $n$. + + +\begin{alltt} +Require Import Bvector. + +Print vector. +\it +Inductive vector (A : Set) : nat {\arrow} Set := + Vnil : vector A 0 + | Vcons : A {\arrow} {\prodsym} n : nat, vector A n {\arrow} vector A (S n) +For vector: Argument scopes are [type_scope nat_scope] +For Vnil: Argument scope is [type_scope] +For Vcons: Argument scopes are [type_scope _ nat_scope _] +\end{alltt} + + +Remark the difference between the two parameters $A$ and $n$: +The first one is a \textsl{general parameter}, global to all the +introduction rules,while the second one is an \textsl{index}, which is +instantiated differently in the introduction rules. +Such types parameterized by regular +values are called \emph{dependent types}. + +\begin{alltt} +Check (Vnil nat). +\it Vnil nat + : vector nat 0 + +\tt Check (fun (A:Set)(a:A){\funarrow} Vcons _ a _ (Vnil _)). +\it fun (A : Set) (a : A) {\funarrow} Vcons A a 0 (Vnil A) + : {\prodsym} A : Set, A {\arrow} vector A 1 + + +\tt Check (Vcons _ 5 _ (Vcons _ 3 _ (Vnil _))). +\it Vcons nat 5 1 (Vcons nat 3 0 (Vnil nat)) + : vector nat 2 +\end{alltt} + +\subsection{The contradictory proposition.} +Another example of an inductive type is the contradictory proposition. +This type inhabits the universe of propositions, and has no element +at all. +%V8 A prendre +\begin{alltt} +Print False. +\it{} Inductive False : Prop := +\end{alltt} + +\noindent Notice that no constructor is given in this definition. + +\subsection{The tautological proposition.} +Similarly, the +tautological proposition {\True} is defined as an inductive type +with only one element {\I}: + +%V8 A prendre +\begin{alltt} +Print True. +\it{}Inductive True : Prop := I : True +\end{alltt} + +\subsection{Relations as inductive types.} +Some relations can also be introduced in a smart way as an inductive family +of propositions. Let us take as example the order $n \leq m$ on natural +numbers, called \citecoq{le} in {\coq}. + This relation is introduced through +the following definition, quoted from the standard library\footnote{In the interpretation scope +for Peano arithmetic: +\citecoq{nat\_scope}, ``~\citecoq{n <= m}~'' is equivalent to +``~\citecoq{le n m}~'' .}: + + + + +%V8 A prendre +\begin{alltt} +Print le. \it +Inductive le (n:nat) : nat\arrow{}Prop := +| le_n: n {\coqle} n +| le_S: {\prodsym} m, n {\coqle} m \arrow{} n {\coqle} S m. +\end{alltt} + +Notice that in this definition $n$ is a general parameter, +while the second argument of \citecoq{le} is an index (see section +~\ref{vectors}). + This definition +introduces the binary relation $n {\leq} m$ as the family of unary predicates +``\textsl{to be greater or equal than a given $n$}'', parameterized by $n$. + +The introduction rules of this type can be seen as a sort of Prolog +rules for proving that a given integer $n$ is less or equal than another one. +In fact, an object of type $n{\leq} m$ is nothing but a proof +built up using the constructors \textsl{le\_n} and +\textsl{le\_S} of this type. As an example, let us construct +a proof that zero is less or equal than three using {\coq}'s interactive +proof mode. +Such an object can be obtained applying three times the second +introduction rule of \citecoq{le}, to a proof that zero is less or equal +than itself, +which is provided by the first constructor of \citecoq{le}: + +%V8 A prendre +\begin{alltt} +Theorem zero_leq_three: 0 {\coqle} 3. +Proof. +\it{} 1 subgoal + +============================ + 0 {\coqle} 3 + +\tt{}Proof. + constructor 2. + +\it{} 1 subgoal +============================ + 0 {\coqle} 2 + +\tt{} constructor 2. +\it{} 1 subgoal +============================ + 0 {\coqle} 1 + +\tt{} constructor 2 +\it{} 1 subgoal +============================ + 0 {\coqle} 0 + +\tt{} constructor 1. + +\it{}Proof completed +\tt{}Qed. +\end{alltt} + +\noindent When +the current goal is an inductive type, the tactic +``~\citecoq{constructor $i$}~'' \refmancite{Section \ref{constructor}} applies the $i$-th constructor in the +definition of the type. We can take a look at the proof constructed +using the command \texttt{Print}: + +%V8 A prendre +\begin{alltt} +Print Print zero_leq_three. +\it{}zero_leq_three = +zero_leq_three = le_S 0 2 (le_S 0 1 (le_S 0 0 (le_n 0))) + : 0 {\coqle} 3 +\end{alltt} + +When the parameter $i$ is not supplied, the tactic \texttt{constructor} +tries to apply ``~\texttt{constructor $1$}~'', ``~\texttt{constructor $2$}~'',\dots, +``~\texttt{constructor $n$}~'' where $n$ is the number of constructors +of the inductive type (2 in our example) of the conclusion of the goal. +Our little proof can thus be obtained iterating the tactic +\texttt{constructor} until it fails: + +%V8 A prendre +\begin{alltt} +Lemma zero_leq_three': 0 {\coqle} 3. + repeat constructor. +Qed. +\end{alltt} + +Notice that the strict order on \texttt{nat}, called \citecoq{lt} +is not inductively defined: + +\begin{alltt} +Print lt. +\it +lt = fun n m : nat {\funarrow} S n {\coqle} m + : nat {\arrow} nat {\arrow} Prop +\tt +Lemma zero_lt_three : 0 < 3. +Proof. + unfold lt. +\it +==================== + 1 {\coqle} 3 +\tt + repeat constructor. +Qed. +\end{alltt} + + + +\subsection{The propositional equality type.} \label{equality} +In {\coq}, the propositional equality between two inhabitants $a$ and +$b$ of +the same type $A$ , +noted $a=b$, is introduced as a family of recursive predicates +``~\textsl{to be equal to $a$}~'', parameterised by both $a$ and its type +$A$. This family of types has only one introduction rule, which +corresponds to reflexivity. +Notice that the syntax ``\citecoq{$a$ = $b$}~'' is an abbreviation +for ``\citecoq{eq $a$ $b$}~'', and that the parameter $A$ is \emph{implicit}, +as it can be infered from $a$. +%V8 A prendre +\begin{alltt} +Print eq. +\it{} Inductive eq (A : Type) (x : A) : A \arrow{} Prop := + refl_equal : x = x +For eq: Argument A is implicit +For refl_equal: Argument A is implicit +For eq: Argument scopes are [type_scope _ _] +For refl_equal: Argument scopes are [type_scope _] +\end{alltt} + +Notice also that the first parameter $A$ of \texttt{eq} has type +\texttt{Type}. The type system of {\coq} allows us to consider equality between +various kinds of terms: elements of a set, proofs, propositions, +types, and so on. +Look at \cite{coqrefman, coqart} to get more details on {\coq}'s type +system, as well as implicit arguments and argument scopes. + + +\begin{alltt} +Lemma eq_3_3 : 2 + 1 = 3. +Proof. + reflexivity. +Qed. + +Lemma eq_proof_proof : refl_equal (2*6) = refl_equal (3*4). +Proof. + reflexivity. +Qed. + +Print eq_proof_proof. +\it eq_proof_proof = +refl_equal (refl_equal (3 * 4)) + : refl_equal (2 * 6) = refl_equal (3 * 4) +\tt + +Lemma eq_lt_le : ( 2 < 4) = (3 {\coqle} 4). +Proof. + reflexivity. +Qed. + +Lemma eq_nat_nat : nat = nat. +Proof. + reflexivity. +Qed. + +Lemma eq_Set_Set : Set = Set. +Proof. + reflexivity. +Qed. +\end{alltt} + +\subsection{Logical connectives.} \label{LogicalConnectives} +The conjunction and disjunction of two propositions are also examples +of recursive types: + +\begin{alltt} +Inductive or (A B : Prop) : Prop := + or_introl : A \arrow{} A {\coqor} B | or_intror : B \arrow{} A {\coqor} B + +Inductive and (A B : Prop) : Prop := + conj : A \arrow{} B \arrow{} A {\coqand} B + +\end{alltt} + +The propositions $A$ and $B$ are general parameters of these +connectives. Choosing different universes for +$A$ and $B$ and for the inductive type itself gives rise to different +type constructors. For example, the type \textsl{sumbool} is a +disjunction but with computational contents. + +\begin{alltt} +Inductive sumbool (A B : Prop) : Set := + left : A \arrow{} \{A\} + \{B\} | right : B \arrow{} \{A\} + \{B\} +\end{alltt} + + + +This type --noted \texttt{\{$A$\}+\{$B$\}} in {\coq}-- can be used in {\coq} +programs as a sort of boolean type, to check whether it is $A$ or $B$ +that is true. The values ``~\citecoq{left $p$}~'' and +``~\citecoq{right $q$}~'' replace the boolean values \textsl{true} and +\textsl{false}, respectively. The advantage of this type over +\textsl{bool} is that it makes available the proofs $p$ of $A$ or $q$ +of $B$, which could be necessary to construct a verification proof +about the program. +For instance, let us consider the certified program \citecoq{le\_lt\_dec} +of the Standard Library. + +\begin{alltt} +Require Import Compare_dec. +Check le_lt_dec. +\it +le_lt_dec + : {\prodsym} n m : nat, \{n {\coqle} m\} + \{m < n\} + +\end{alltt} + +We use \citecoq{le\_lt\_dec} to build a function for computing +the max of two natural numbers: + +\begin{alltt} +Definition max (n p :nat) := match le_lt_dec n p with + | left _ {\funarrow} p + | right _ {\funarrow} n + end. +\end{alltt} + +In the following proof, the case analysis on the term +``~\citecoq{le\_lt\_dec n p}~'' gives us an access to proofs +of $n\leq p$ in the first case, $p +% Cases p of +% lt_intro1 {\funarrow} (lt_intro1 (S n)) +% | (lt_intro2 m1 p2) {\funarrow} (lt_intro2 (S n) (S m1) (lt_n_S n m1 p2)) +% end. +%\end{alltt} + +%The guardedness condition must be satisfied only by the last argument +%of the enclosed list. For example, the following declaration is an +%alternative way of defining addition: + +%\begin{alltt} +%Reset add. +%Fixpoint add [n:nat] : nat\arrow{}nat := +% Cases n of +% O {\funarrow} [x:nat]x +% | (S m) {\funarrow} [x:nat](add m (S x)) +% end. +%\end{alltt} + +In the following definition of addition, +the second argument of \verb@plus''@ grows at each +recursive call. However, as the first one always decreases, the +definition is sound. +\begin{alltt} +Fixpoint plus'' (n p:nat) \{struct n\} : nat := + match n with + | 0 {\funarrow} p + | S m {\funarrow} plus'' m (S p) + end. +\end{alltt} + + Moreover, the argument in the recursive call +could be a deeper component of $n$. This is the case in the following +definition of a boolean function determining whether a number is even +or odd: + +\begin{alltt} +Fixpoint even_test (n:nat) : bool := + match n + with 0 {\funarrow} true + | 1 {\funarrow} false + | S (S p) {\funarrow} even_test p + end. +\end{alltt} + +Mutually dependent definitions by structural induction are also +allowed. For example, the previous function \textsl{even} could alternatively +be defined using an auxiliary function \textsl{odd}: + +\begin{alltt} +Reset even_test. + + + +Fixpoint even_test (n:nat) : bool := + match n + with + | 0 {\funarrow} true + | S p {\funarrow} odd_test p + end +with odd_test (n:nat) : bool := + match n + with + | 0 {\funarrow} false + | S p {\funarrow} even_test p + end. +\end{alltt} + +%\begin{exercise} +%Define a function by structural induction that computes the number of +%nodes of a tree structure defined in page \pageref{Forest}. +%\end{exercise} + +Definitions by structural induction are computed + only when they are applied, and the decreasing argument +is a term having a constructor at the head. We can check this using +the \texttt{Eval} command, which computes the normal form of a well +typed term. + +\begin{alltt} +Eval simpl in even_test. +\it + = even_test + : nat {\arrow} bool +\tt +Eval simpl in (fun x : nat {\funarrow} even x). +\it + = fun x : nat {\funarrow} even x + : nat {\arrow} Prop +\tt +Eval simpl in (fun x : nat => plus 5 x). +\it + = fun x : nat {\funarrow} S (S (S (S (S x)))) + +\tt +Eval simpl in (fun x : nat {\funarrow} even_test (plus 5 x)). +\it + = fun x : nat {\funarrow} odd_test x + : nat {\arrow} bool +\tt +Eval simpl in (fun x : nat {\funarrow} even_test (plus x 5)). +\it + = fun x : nat {\funarrow} even_test (x + 5) + : nat {\arrow} bool +\end{alltt} + + +%\begin{exercise} +%Prove that the second definition of even satisfies the following +%theorem: +%\begin{verbatim} +%Theorem unfold_even : +% (x:nat) +% (even x)= (Cases x of +% O {\funarrow} true +% | (S O) {\funarrow} false +% | (S (S m)) {\funarrow} (even m) +% end). +%\end{verbatim} +%\end{exercise} + +\subsection{Proofs by Structural Induction} + +The principle of structural induction can be also used in order to +define proofs, that is, to prove theorems. Let us call an +\textsl{elimination combinator} any function that, given a predicate +$P$, defines a proof of ``~$P\;x$~'' by structural induction on $x$. In +{\coq}, the principle of proof by induction on natural numbers is a +particular case of an elimination combinator. The definition of this +combinator depends on three general parameters: the predicate to be +proven, the base case, and the inductive step: + +\begin{alltt} +Section Principle_of_Induction. +Variable P : nat {\arrow} Prop. +Hypothesis base_case : P 0. +Hypothesis inductive_step : {\prodsym} n:nat, P n {\arrow} P (S n). +Fixpoint nat_ind (n:nat) : (P n) := + match n return P n with + | 0 {\funarrow} base_case + | S m {\funarrow} inductive_step m (nat_ind m) + end. + +End Principle_of_Induction. +\end{alltt} + +As this proof principle is used very often, {\coq} automatically generates it +when an inductive type is introduced. Similar principles +\texttt{nat\_rec} and \texttt{nat\_rect} for defining objects in the +universes $\Set$ and $\Type$ are also automatically generated +\footnote{In fact, whenever possible, {\coq} generates the +principle \texttt{$I$\_rect}, then derives from it the +weaker principles \texttt{$I$\_ind} and \texttt{$I$\_rec}. +If some principle has to be defined by hand, the user may try +to build \texttt{$I$\_rect} (if possible). Thanks to {\coq}'s conversion +rule, this principle can be used directly to build proofs and/or +programs.}. The +command \texttt{Scheme} \refmancite{Section \ref{Scheme}} can be +used to generate an elimination combinator from certain parameters, +like the universe that the defined objects must inhabit, whether the +case analysis in the definitions must be dependent or not, etc. For +example, it can be used to generate an elimination combinator for +reasoning on even natural numbers from the mutually dependent +predicates introduced in page \pageref{Even}. We do not display the +combinators here by lack of space, but you can see them using the +\texttt{Print} command. + +\begin{alltt} +Scheme Even_induction := Minimality for even Sort Prop +with Odd_induction := Minimality for odd Sort Prop. +\end{alltt} + +\begin{alltt} +Theorem even_plus_four : {\prodsym} n:nat, even n {\arrow} even (4+n). +Proof. + intros n H. + elim H using Even_induction with (P0 := fun n {\funarrow} odd (4+n)); + simpl;repeat constructor;assumption. +Qed. +\end{alltt} + +Another example of an elimination combinator is the principle +of double induction on natural numbers, introduced by the following +definition: + +\begin{alltt} +Section Principle_of_Double_Induction. +Variable P : nat {\arrow} nat {\arrow}Prop. +Hypothesis base_case1 : {\prodsym} m:nat, P 0 m. +Hypothesis base_case2 : {\prodsym} n:nat, P (S n) 0. +Hypothesis inductive_step : {\prodsym} n m:nat, P n m {\arrow} + \,\, P (S n) (S m). + +Fixpoint nat_double_ind (n m:nat)\{struct n\} : P n m := + match n, m return P n m with + | 0 , x {\funarrow} base_case1 x + | (S x), 0 {\funarrow} base_case2 x + | (S x), (S y) {\funarrow} inductive_step x y (nat_double_ind x y) + end. +End Principle_of_Double_Induction. +\end{alltt} + +Changing the type of $P$ into $\nat\rightarrow\nat\rightarrow\Set$, +another combinator \texttt{nat\_double\_rec} for constructing +(certified) programs can be defined in exactly the same way. +This definition is left as an exercise.\label{natdoublerec} + +\iffalse +\begin{alltt} +Section Principle_of_Double_Recursion. +Variable P : nat {\arrow} nat {\arrow} Set. +Hypothesis base_case1 : {\prodsym} x:nat, P 0 x. +Hypothesis base_case2 : {\prodsym} x:nat, P (S x) 0. +Hypothesis inductive_step : {\prodsym} n m:nat, P n m {\arrow} P (S n) (S m). +Fixpoint nat_double_rec (n m:nat)\{struct n\} : P n m := + match n, m return P n m with + 0 , x {\funarrow} base_case1 x + | (S x), 0 {\funarrow} base_case2 x + | (S x), (S y) {\funarrow} inductive_step x y (nat_double_rec x y) + end. +End Principle_of_Double_Recursion. +\end{alltt} +\fi +For instance the function computing the minimum of two natural +numbers can be defined in the following way: + +\begin{alltt} +Definition min : nat {\arrow} nat {\arrow} nat := + nat_double_rec (fun (x y:nat) {\funarrow} nat) + (fun (x:nat) {\funarrow} 0) + (fun (y:nat) {\funarrow} 0) + (fun (x y r:nat) {\funarrow} S r). +Eval compute in (min 5 8). +\it += 5 : nat +\end{alltt} + + +%\begin{exercise} +% +%Define the combinator \texttt{nat\_double\_rec}, and apply it +%to give another definition of \citecoq{le\_lt\_dec} (using the theorems +%of the \texttt{Arith} library). +%\end{exercise} + +\subsection{Using Elimination Combinators.} +The tactic \texttt{apply} can be used to apply one of these proof +principles during the development of a proof. + +\begin{alltt} +Lemma not_circular : {\prodsym} n:nat, n {\coqdiff} S n. +Proof. + intro n. + apply nat_ind with (P:= fun n {\funarrow} n {\coqdiff} S n). +\it + + + +2 subgoals + + n : nat + ============================ + 0 {\coqdiff} 1 + + +subgoal 2 is: + {\prodsym} n0 : nat, n0 {\coqdiff} S n0 {\arrow} S n0 {\coqdiff} S (S n0) + +\tt + discriminate. + red; intros n0 Hn0 eqn0Sn0;injection eqn0Sn0;trivial. +Qed. +\end{alltt} + +The tactic \texttt{elim} \refmancite{Section \ref{Elim}} is a +refinement of \texttt{apply}, specially designed for the application +of elimination combinators. If $t$ is an object of an inductive type +$I$, then ``~\citecoq{elim $t$}~'' tries to find an abstraction $P$ of the +current goal $G$ such that $(P\;t)\equiv G$. Then it solves the goal +applying ``~$I\texttt{\_ind}\;P$~'', where $I$\texttt{\_ind} is the +combinator associated to $I$. The different cases of the induction +then appear as subgoals that remain to be solved. +In the previous proof, the tactic call ``~\citecoq{apply nat\_ind with (P:= fun n {\funarrow} n {\coqdiff} S n)}~'' can simply be replaced with ``~\citecoq{elim n}~''. + +The option ``~\citecoq{\texttt{elim} $t$ \texttt{using} $C$}~'' + allows to use a +derived combinator $C$ instead of the default one. Consider the +following theorem, stating that equality is decidable on natural +numbers: + +\label{iseqpage} +\begin{alltt} +Lemma eq_nat_dec : {\prodsym} n p:nat, \{n=p\}+\{n {\coqdiff} p\}. +Proof. + intros n p. +\end{alltt} + +Let us prove this theorem using the combinator \texttt{nat\_double\_rec} +of section~\ref{natdoublerec}. The example also illustrates how +\texttt{elim} may sometimes fail in finding a suitable abstraction $P$ +of the goal. Note that if ``~\texttt{elim n}~'' + is used directly on the +goal, the result is not the expected one. + +\vspace{12pt} + +%\pagebreak +\begin{alltt} + elim n using nat_double_rec. +\it +4 subgoals + + n : nat + p : nat + ============================ + {\prodsym} x : nat, \{x = p\} + \{x {\coqdiff} p\} + +subgoal 2 is: + nat {\arrow} \{0 = p\} + \{0 {\coqdiff} p\} + +subgoal 3 is: + nat {\arrow} {\prodsym} m : nat, \{m = p\} + \{m {\coqdiff} p\} {\arrow} \{S m = p\} + \{S m {\coqdiff} p\} + +subgoal 4 is: + nat +\end{alltt} + +The four sub-goals obtained do not correspond to the premises that +would be expected for the principle \texttt{nat\_double\_rec}. The +problem comes from the fact that +this principle for eliminating $n$ +has a universally quantified formula as conclusion, which confuses +\texttt{elim} about the right way of abstracting the goal. + +%In effect, let us consider the type of the goal before the call to +%\citecoq{elim}: ``~\citecoq{\{n = p\} + \{n {\coqdiff} p\}}~''. + +%Among all the abstractions that can be built by ``~\citecoq{elim n}~'' +%let us consider this one +%$P=$\citecoq{fun n :nat {\funarrow} fun q : nat {\funarrow} {\{q= p\} + \{q {\coqdiff} p\}}}. +%It is easy to verify that +%$P$ has type \citecoq{nat {\arrow} nat {\arrow} Set}, and that, if some +%$q:\citecoq{nat}$ is given, then $P\;q\;$ matches the current goal. +%Then applying \citecoq{nat\_double\_rec} with $P$ generates +%four goals, corresponding to + + + + +Therefore, +in this case the abstraction must be explicited using the tactic +\texttt{pattern}. Once the right abstraction is provided, the rest of +the proof is immediate: + +\begin{alltt} +Undo. + pattern p,n. +\it + n : nat + p : nat + ============================ + (fun n0 n1 : nat {\funarrow} \{n1 = n0\} + \{n1 {\coqdiff} n0\}) p n +\tt + elim n using nat_double_rec. +\it +3 subgoals + + n : nat + p : nat + ============================ + {\prodsym} x : nat, \{x = 0\} + \{x {\coqdiff} 0\} + +subgoal 2 is: + {\prodsym} x : nat, \{0 = S x\} + \{0 {\coqdiff} S x\} +subgoal 3 is: + {\prodsym} n0 m : nat, \{m = n0\} + \{m {\coqdiff} n0\} {\arrow} \{S m = S n0\} + \{S m {\coqdiff} S n0\} + +\tt + destruct x; auto. + destruct x; auto. + intros n0 m H; case H. + intro eq; rewrite eq ; auto. + intro neg; right; red ; injection 1; auto. +Defined. +\end{alltt} + + +Notice that the tactic ``~\texttt{decide equality}~'' +\refmancite{Section\ref{DecideEquality}} generalises the proof +above to a large class of inductive types. It can be used for proving +a proposition of the form +$\forall\,(x,y:R),\{x=y\}+\{x{\coqdiff}y\}$, where $R$ is an inductive datatype +all whose constructors take informative arguments ---like for example +the type {\nat}: + +\begin{alltt} +Definition eq_nat_dec' : {\prodsym} n p:nat, \{n=p\} + \{n{\coqdiff}p\}. + decide equality. +Defined. +\end{alltt} + +\begin{exercise} +\begin{enumerate} +\item Define a recursive function \emph{nat2itree} +mapping any natural number $n$ into an infinitely branching +tree of height $n$. +\item Provide an elimination combinator for these trees. +\item Prove that the relation \citecoq{itree\_le} is a preorder +(i.e. reflexive and transitive). +\end{enumerate} +\end{exercise} + +\begin{exercise} \label{zeroton} +Define the type of lists, and a predicate ``being an ordered list'' +using an inductive family. Then, define the function +$(from\;n)=0::1\;\ldots\; n::\texttt{nil}$ and prove that it always generates an +ordered list. +\end{exercise} + + +\subsection{Well-founded Recursion} +\label{WellFoundedRecursion} + +Structural induction is a strong elimination rule for inductive types. +This method can be used to define any function whose termination is +based on the well-foundedness of certain order relation $R$ decreasing +at each recursive call. What makes this principle so strong is the +possibility of reasoning by structural induction on the proof that +certain $R$ is well-founded. In order to illustrate this we have +first to introduce the predicate of accessibility. + +\begin{alltt} +Print Acc. +\it +Inductive Acc (A : Set) (R : A {\arrow} A {\arrow} Prop) (x:A) : Prop := + Acc_intro : ({\prodsym} y : A, R y x {\arrow} Acc R y) {\arrow} Acc R x +For Acc: Argument A is implicit +For Acc_intro: Arguments A, R are implicit + +\dots +\end{alltt} + +\noindent This inductive predicate characterize those elements $x$ of +$A$ such that any descending $R$-chain $\ldots x_2\;R\;x_1\;R\;x$ +starting from $x$ is finite. A well-founded relation is a relation +such that all the elements of $A$ are accessible. + +Consider now the problem of representing in {\coq} the following ML +function $\textsl{div}(x,y)$ on natural numbers, which computes +$\lceil\frac{x}{y}\rceil$ if $y>0$ and yields $x$ otherwise. + +\begin{verbatim} +let rec div x y = + if x = 0 then 0 + else if y = 0 then x + else (div (x-y) y)+1;; +\end{verbatim} + + +The equality test on natural numbers can be represented as the +function \textsl{eq\_nat\_dec} defined page \pageref{iseqpage}. Giving $x$ and +$y$, this function yields either the value $(\textsl{left}\;p)$ if +there exists a proof $p:x=y$, or the value $(\textsl{right}\;q)$ if +there exists $q:a\not = b$. The subtraction function is already +defined in the library \citecoq{Minus}. + +Hence, direct translation of the ML function \textsl{div} would be: + +\begin{alltt} +Require Import Minus. + +Fixpoint div (x y:nat)\{struct x\}: nat := + if eq_nat_dec x 0 + then 0 + else if eq_nat_dec y 0 + then x + else S (div (x-y) y). + +\it Error: +Recursive definition of div is ill-formed. +In environment +div : nat {\arrow} nat {\arrow} nat +x : nat +y : nat +_ : x {\coqdiff} 0 +_ : y {\coqdiff} 0 + +Recursive call to div has principal argument equal to +"x - y" +instead of a subterm of x +\end{alltt} + + +The program \texttt{div} is rejected by {\coq} because it does not verify +the syntactical condition to ensure termination. In particular, the +argument of the recursive call is not a pattern variable issued from a +case analysis on $x$. +We would have the same problem if we had the directive +``~\citecoq{\{struct y\}}~'' instead of ``~\citecoq{\{struct x\}}~''. +However, we know that this program always +stops. One way to justify its termination is to define it by +structural induction on a proof that $x$ is accessible trough the +relation $<$. Notice that any natural number $x$ is accessible +for this relation. In order to do this, it is first necessary to prove +some auxiliary lemmas, justifying that the first argument of +\texttt{div} decreases at each recursive call. + +\begin{alltt} +Lemma minus_smaller_S : {\prodsym} x y:nat, x - y < S x. +Proof. + intros x y; pattern y, x; + elim x using nat_double_ind. + destruct x0; auto with arith. + simpl; auto with arith. + simpl; auto with arith. +Qed. + + +Lemma minus_smaller_positive : + {\prodsym} x y:nat, x {\coqdiff}0 {\arrow} y {\coqdiff} 0 {\arrow} x - y < x. +Proof. + destruct x; destruct y; + ( simpl;intros; apply minus_smaller || + intros; absurd (0=0); auto). +Qed. +\end{alltt} + +\noindent The last two lemmas are necessary to prove that for any pair +of positive natural numbers $x$ and $y$, if $x$ is accessible with +respect to \citecoq{lt}, then so is $x-y$. + +\begin{alltt} +Definition minus_decrease : {\prodsym} x y:nat, Acc lt x {\arrow} + x {\coqdiff} 0 {\arrow} + y {\coqdiff} 0 {\arrow} + Acc lt (x-y). +Proof. + intros x y H; case H. + intros Hz posz posy. + apply Hz; apply minus_smaller_positive; assumption. +Defined. +\end{alltt} + +Let us take a look at the proof of the lemma \textsl{minus\_decrease}, since +the way in which it has been proven is crucial for what follows. +\begin{alltt} +Print minus_decrease. +\it +minus_decrease = +fun (x y : nat) (H : Acc lt x) {\funarrow} +match H in (Acc _ y0) return (y0 {\coqdiff} 0 {\arrow} y {\coqdiff} 0 {\arrow} Acc lt (y0 - y)) with +| Acc_intro z Hz {\funarrow} + fun (posz : z {\coqdiff} 0) (posy : y {\coqdiff} 0) {\funarrow} + Hz (z - y) (minus_smaller_positive z y posz posy) +end + : {\prodsym} x y : nat, Acc lt x {\arrow} x {\coqdiff} 0 {\arrow} y {\coqdiff} 0 {\arrow} Acc lt (x - y) + +\end{alltt} +\noindent Notice that the function call +$(\texttt{minus\_decrease}\;n\;m\;H)$ +indeed yields an accessibility proof that is \textsl{structurally +smaller} than its argument $H$, because it is (an application of) its +recursive component $Hz$. This enables to justify the following +definition of \textsl{div\_aux}: + +\begin{alltt} +Definition div_aux (x y:nat)(H: Acc lt x):nat. + fix 3. + intros. + refine (if eq_nat_dec x 0 + then 0 + else if eq_nat_dec y 0 + then y + else div_aux (x-y) y _). +\it + div_aux : {\prodsym} x : nat, nat {\arrow} Acc lt x {\arrow} nat + x : nat + y : nat + H : Acc lt x + _ : x {\coqdiff} 0 + _0 : y {\coqdiff} 0 + ============================ + Acc lt (x - y) + +\tt + apply (minus_decrease x y H);auto. +Defined. +\end{alltt} + +The main division function is easily defined, using the theorem +\citecoq{lt\_wf} of the library \citecoq{Wf\_nat}. This theorem asserts that +\citecoq{nat} is well founded w.r.t. \citecoq{lt}, thus any natural number +is accessible. +\begin{alltt} +Definition div x y := div_aux x y (lt_wf x). +\end{alltt} + +Let us explain the proof above. In the definition of \citecoq{div\_aux}, +what decreases is not $x$ but the \textsl{proof} of the accessibility +of $x$. The tactic ``~\texttt{fix 3}~'' is used to indicate that the proof +proceeds by structural induction on the third argument of the theorem +--that is, on the accessibility proof. It also introduces a new +hypothesis in the context, named as the current theorem, and with the +same type as the goal. Then, the proof is refined with an incomplete +proof term, containing a hole \texttt{\_}. This hole corresponds to the proof +of accessibility for $x-y$, and is filled up with the (smaller!) +accessibility proof provided by the function \texttt{minus\_decrease}. + + +\noindent Let us take a look to the term \textsl{div\_aux} defined: + +\pagebreak +\begin{alltt} +Print div_aux. +\it +div_aux = +(fix div_aux (x y : nat) (H : Acc lt x) \{struct H\} : nat := + match eq_nat_dec x 0 with + | left _ {\funarrow} 0 + | right _ {\funarrow} + match eq_nat_dec y 0 with + | left _ {\funarrow} y + | right _0 {\funarrow} div_aux (x - y) y (minus_decrease x y H _ _0) + end + end) + : {\prodsym} x : nat, nat {\arrow} Acc lt x {\arrow} nat + +\end{alltt} + +If the non-informative parts from this proof --that is, the +accessibility proof-- are erased, then we obtain exactly the program +that we were looking for. +\begin{alltt} + +Extraction div. + +\it +let div x y = + div_aux x y +\tt + +Extraction div_aux. + +\it +let rec div_aux x y = + match eq_nat_dec x O with + | Left {\arrow} O + | Right {\arrow} + (match eq_nat_dec y O with + | Left {\arrow} y + | Right {\arrow} div_aux (minus x y) y) +\end{alltt} + +This methodology enables the representation +of any program whose termination can be proved in {\coq}. Once the +expected properties from this program have been verified, the +justification of its termination can be thrown away, keeping just the +desired computational behavior for it. + +\section{A case study in dependent elimination}\label{CaseStudy} + +Dependent types are very expressive, but ignoring some useful +techniques can cause some problems to the beginner. +Let us consider again the type of vectors (see section~\ref{vectors}). +We want to prove a quite trivial property: the only value of type +``~\citecoq{vector A 0}~'' is ``~\citecoq{Vnil $A$}~''. + +Our first naive attempt leads to a \emph{cul-de-sac}. +\begin{alltt} +Lemma vector0_is_vnil : + {\prodsym} (A:Set)(v:vector A 0), v = Vnil A. +Proof. + intros A v;inversion v. +\it +1 subgoal + + A : Set + v : vector A 0 + ============================ + v = Vnil A +\tt +Abort. +\end{alltt} + +Another attempt is to do a case analysis on a vector of any length +$n$, under an explicit hypothesis $n=0$. The tactic +\texttt{discriminate} will help us to get rid of the case +$n=\texttt{S $p$}$. +Unfortunately, even the statement of our lemma is refused! + +\begin{alltt} + Lemma vector0_is_vnil_aux : + {\prodsym} (A:Set)(n:nat)(v:vector A n), n = 0 {\arrow} v = Vnil A. + +\it +Error: In environment +A : Set +n : nat +v : vector A n +e : n = 0 +The term "Vnil A" has type "vector A 0" while it is expected to have type + "vector A n" +\end{alltt} + +In effect, the equality ``~\citecoq{v = Vnil A}~'' is ill typed, +because the type ``~\citecoq{vector A n}~'' is not \emph{convertible} +with ``~\citecoq{vector A 0}~''. + +This problem can be solved if we consider the heterogeneous +equality \citecoq{JMeq} \cite{conor:motive} +which allows us to consider terms of different types, even if this +equality can only be proven for terms in the same type. +The axiom \citecoq{JMeq\_eq}, from the library \citecoq{JMeq} allows us to convert a +heterogeneous equality to a standard one. + +\begin{alltt} +Lemma vector0_is_vnil_aux : + {\prodsym} (A:Set)(n:nat)(v:vector A n), + n= 0 {\arrow} JMeq v (Vnil A). +Proof. + destruct v. + auto. + intro; discriminate. +Qed. +\end{alltt} + +Our property of vectors of null length can be easily proven: + +\begin{alltt} +Lemma vector0_is_vnil : {\prodsym} (A:Set)(v:vector A 0), v = Vnil A. + intros a v;apply JMeq_eq. + apply vector0_is_vnil_aux. + trivial. +Qed. +\end{alltt} + +It is interesting to look at another proof of +\citecoq{vector0\_is\_vnil}, which illustrates a technique developed +and used by various people (consult in the \emph{Coq-club} mailing +list archive the contributions by Yves Bertot, Pierre Letouzey, Laurent Théry, +Jean Duprat, and Nicolas Magaud, Venanzio Capretta and Conor McBride). +This technique is also used for unfolding infinite list definitions +(see chapter13 of~\cite{coqart}). +Notice that this definition does not rely on any axiom (\emph{e.g.} \texttt{JMeq\_eq}). + +We first give a new definition of the identity on vectors. Before that, +we make the use of constructors and selectors lighter thanks to +the implicit arguments feature: + +\begin{alltt} +Implicit Arguments Vcons [A n]. +Implicit Arguments Vnil [A]. +Implicit Arguments Vhead [A n]. +Implicit Arguments Vtail [A n]. + +Definition Vid : {\prodsym} (A : Set)(n:nat), vector A n {\arrow} vector A n. +Proof. + destruct n; intro v. + exact Vnil. + exact (Vcons (Vhead v) (Vtail v)). +Defined. +\end{alltt} + + +Then we prove that \citecoq{Vid} is the identity on vectors: + +\begin{alltt} +Lemma Vid_eq : {\prodsym} (n:nat) (A:Set)(v:vector A n), v=(Vid _ n v). +Proof. + destruct v. + +\it + A : Set + ============================ + Vnil = Vid A 0 Vnil + +subgoal 2 is: + Vcons a v = Vid A (S n) (Vcons a v) +\tt + reflexivity. + reflexivity. +Defined. +\end{alltt} + +Why defining a new identity function on vectors? The following +dialogue shows that \citecoq{Vid} has some interesting computational +properties: + +\begin{alltt} +Eval simpl in (fun (A:Set)(v:vector A 0) {\funarrow} (Vid _ _ v)). +\it = fun (A : Set) (_ : vector A 0) {\funarrow} Vnil + : {\prodsym} A : Set, vector A 0 {\arrow} vector A 0 + +\end{alltt} + +Notice that the plain identity on vectors doesn't convert \citecoq{v} +into \citecoq{Vnil}. +\begin{alltt} +Eval simpl in (fun (A:Set)(v:vector A 0) {\funarrow} v). +\it = fun (A : Set) (v : vector A 0) {\funarrow} v + : {\prodsym} A : Set, vector A 0 {\arrow} vector A 0 +\end{alltt} + +Then we prove easily that any vector of length 0 is \citecoq{Vnil}: + +\begin{alltt} +Theorem zero_nil : {\prodsym} A (v:vector A 0), v = Vnil. +Proof. + intros. + change (Vnil (A:=A)) with (Vid _ 0 v). +\it +1 subgoal + + A : Set + v : vector A 0 + ============================ + v = Vid A 0 v +\tt + apply Vid_eq. +Defined. +\end{alltt} + +A similar result can be proven about vectors of strictly positive +lenght\footnote{As for \citecoq{Vid} and \citecoq{Vid\_eq}, this definition +is from Jean Duprat.}. + +\begin{alltt} + + +Theorem decomp : + {\prodsym} (A : Set) (n : nat) (v : vector A (S n)), + v = Vcons (Vhead v) (Vtail v). +Proof. + intros. + change (Vcons (Vhead v) (Vtail v)) with (Vid _ (S n) v). +\it + 1 subgoal + + A : Set + n : nat + v : vector A (S n) + ============================ + v = Vid A (S n) v + +\tt{} apply Vid_eq. +Defined. +\end{alltt} + + +Both lemmas: \citecoq{zero\_nil} and \citecoq{decomp}, +can be used to easily derive a double recursion principle +on vectors of same length: + + +\begin{alltt} +Definition vector_double_rect : + {\prodsym} (A:Set) (P: {\prodsym} (n:nat),(vector A n){\arrow}(vector A n) {\arrow} Type), + P 0 Vnil Vnil {\arrow} + ({\prodsym} n (v1 v2 : vector A n) a b, P n v1 v2 {\arrow} + P (S n) (Vcons a v1) (Vcons b v2)) {\arrow} + {\prodsym} n (v1 v2 : vector A n), P n v1 v2. + induction n. + intros; rewrite (zero_nil _ v1); rewrite (zero_nil _ v2). + auto. + intros v1 v2; rewrite (decomp _ _ v1);rewrite (decomp _ _ v2). + apply X0; auto. +Defined. +\end{alltt} + +Notice that, due to the conversion rule of {\coq}'s type system, +this function can be used directly with \citecoq{Prop} or \citecoq{Set} +instead of type (thus it is useless to build +\citecoq{vector\_double\_ind} and \citecoq{vector\_double\_rec}) from scratch. + +We finish this example with showing how to define the bitwise +\emph{or} on boolean vectors of the same length, +and proving a little property about this +operation. + +\begin{alltt} +Definition bitwise_or n v1 v2 : vector bool n := + vector_double_rect + bool + (fun n v1 v2 {\funarrow} vector bool n) + Vnil + (fun n v1 v2 a b r {\funarrow} Vcons (orb a b) r) n v1 v2. +\end{alltt} + +Let us define recursively the $n$-th element of a vector. Notice +that it must be a partial function, in case $n$ is greater or equal +than the length of the vector. Since {\coq} only considers total +functions, the function returns a value in an \emph{option} type. + +\begin{alltt} +Fixpoint vector_nth (A:Set)(n:nat)(p:nat)(v:vector A p) + \{struct v\} + : option A := + match n,v with + _ , Vnil {\funarrow} None + | 0 , Vcons b _ _ {\funarrow} Some b + | S n', Vcons _ p' v' {\funarrow} vector_nth A n' p' v' + end. +Implicit Arguments vector_nth [A p]. +\end{alltt} + +We can now prove --- using the double induction combinator --- +a simple property relying \citecoq{vector\_nth} and \citecoq{bitwise\_or}: + +\begin{alltt} +Lemma nth_bitwise : + {\prodsym} (n:nat) (v1 v2: vector bool n) i a b, + vector_nth i v1 = Some a {\arrow} + vector_nth i v2 = Some b {\arrow} + vector_nth i (bitwise_or _ v1 v2) = Some (orb a b). +Proof. + intros n v1 v2; pattern n,v1,v2. + apply vector_double_rect. + simpl. + destruct i; discriminate 1. + destruct i; simpl;auto. + injection 1; injection 2;intros; subst a; subst b; auto. +Qed. +\end{alltt} + + +\section{Co-inductive Types and Non-ending Constructions} +\label{CoInduction} + +The objects of an inductive type are well-founded with respect to +the constructors of the type. In other words, these objects are built +by applying \emph{a finite number of times} the constructors of the type. +Co-inductive types are obtained by relaxing this condition, +and may contain non-well-founded objects \cite{EG96,EG95a}. An +example of a co-inductive type is the type of infinite +sequences formed with elements of type $A$, also called streams. This +type can be introduced through the following definition: + +\begin{alltt} + CoInductive Stream (A: Set) :Set := + | Cons : A\arrow{}Stream A\arrow{}Stream A. +\end{alltt} + +If we are interested in finite or infinite sequences, we consider the type +of \emph{lazy lists}: + +\begin{alltt} +CoInductive LList (A: Set) : Set := + | LNil : LList A + | LCons : A {\arrow} LList A {\arrow} LList A. +\end{alltt} + + +It is also possible to define co-inductive types for the +trees with infinite branches (see Chapter 13 of~\cite{coqart}). + +Structural induction is the way of expressing that inductive types +only contain well-founded objects. Hence, this elimination principle +is not valid for co-inductive types, and the only elimination rule for +streams is case analysis. This principle can be used, for example, to +define the destructors \textsl{head} and \textsl{tail}. + +\begin{alltt} + Definition head (A:Set)(s : Stream A) := + match s with Cons a s' {\funarrow} a end. + + Definition tail (A : Set)(s : Stream A) := + match s with Cons a s' {\funarrow} s' end. +\end{alltt} + +Infinite objects are defined by means of (non-ending) methods of +construction, like in lazy functional programming languages. Such +methods can be defined using the \texttt{CoFixpoint} command +\refmancite{Section \ref{CoFixpoint}}. For example, the following +definition introduces the infinite list $[a,a,a,\ldots]$: + +\begin{alltt} + CoFixpoint repeat (A:Set)(a:A) : Stream A := + Cons a (repeat a). +\end{alltt} + + +However, not every co-recursive definition is an admissible method of +construction. Similarly to the case of structural induction, the +definition must verify a \textsl{guardedness} condition to be +accepted. This condition states that any recursive call in the +definition must be protected --i.e, be an argument of-- some +constructor, and only an argument of constructors \cite{EG94a}. The +following definitions are examples of valid methods of construction: + +\begin{alltt} +CoFixpoint iterate (A: Set)(f: A {\arrow} A)(a : A) : Stream A:= + Cons a (iterate f (f a)). + +CoFixpoint map + (A B:Set)(f: A {\arrow} B)(s : Stream A) : Stream B:= + match s with Cons a tl {\funarrow} Cons (f a) (map f tl) end. +\end{alltt} + +\begin{exercise} +Define two different methods for constructing the stream which +infinitely alternates the values \citecoq{true} and \citecoq{false}. +\end{exercise} +\begin{exercise} +Using the destructors \texttt{head} and \texttt{tail}, define a function +which takes the n-th element of an infinite stream. +\end{exercise} + +A non-ending method of construction is computed lazily. This means +that its definition is unfolded only when the object that it +introduces is eliminated, that is, when it appears as the argument of +a case expression. We can check this using the command +\texttt{Eval}. + +\begin{alltt} +Eval simpl in (fun (A:Set)(a:A) {\funarrow} repeat a). +\it = fun (A : Set) (a : A) {\funarrow} repeat a + : {\prodsym} A : Set, A {\arrow} Stream A +\tt +Eval simpl in (fun (A:Set)(a:A) {\funarrow} head (repeat a)). +\it = fun (A : Set) (a : A) {\funarrow} a + : {\prodsym} A : Set, A {\arrow} A +\end{alltt} + +%\begin{exercise} +%Prove the following theorem: +%\begin{verbatim} +%Theorem expand_repeat : (a:A)(repeat a)=(Cons a (repeat a)). +%\end{verbatim} +%Hint: Prove first the streams version of the lemma in exercise +%\ref{expand}. +%\end{exercise} + +\subsection{Extensional Properties} + +Case analysis is also a valid proof principle for infinite +objects. However, this principle is not sufficient to prove +\textsl{extensional} properties, that is, properties concerning the +whole infinite object \cite{EG95a}. A typical example of an +extensional property is the predicate expressing that two streams have +the same elements. In many cases, the minimal reflexive relation $a=b$ +that is used as equality for inductive types is too small to capture +equality between streams. Consider for example the streams +$\texttt{iterate}\;f\;(f\;x)$ and +$(\texttt{map}\;f\;(\texttt{iterate}\;f\;x))$. Even though these two streams have +the same elements, no finite expansion of their definitions lead to +equal terms. In other words, in order to deal with extensional +properties, it is necessary to construct infinite proofs. The type of +infinite proofs of equality can be introduced as a co-inductive +predicate, as follows: +\begin{alltt} +CoInductive EqSt (A: Set) : Stream A {\arrow} Stream A {\arrow} Prop := + eqst : {\prodsym} s1 s2: Stream A, + head s1 = head s2 {\arrow} + EqSt (tail s1) (tail s2) {\arrow} + EqSt s1 s2. +\end{alltt} + +It is possible to introduce proof principles for reasoning about +infinite objects as combinators defined through +\texttt{CoFixpoint}. However, oppositely to the case of inductive +types, proof principles associated to co-inductive types are not +elimination but \textsl{introduction} combinators. An example of such +a combinator is Park's principle for proving the equality of two +streams, usually called the \textsl{principle of co-induction}. It +states that two streams are equal if they satisfy a +\textit{bisimulation}. A bisimulation is a binary relation $R$ such +that any pair of streams $s_1$ ad $s_2$ satisfying $R$ have equal +heads, and tails also satisfying $R$. This principle is in fact a +method for constructing an infinite proof: + +\begin{alltt} +Section Parks_Principle. +Variable A : Set. +Variable R : Stream A {\arrow} Stream A {\arrow} Prop. +Hypothesis bisim1 : {\prodsym} s1 s2:Stream A, + R s1 s2 {\arrow} head s1 = head s2. + +Hypothesis bisim2 : {\prodsym} s1 s2:Stream A, + R s1 s2 {\arrow} R (tail s1) (tail s2). + +CoFixpoint park_ppl : + {\prodsym} s1 s2:Stream A, R s1 s2 {\arrow} EqSt s1 s2 := + fun s1 s2 (p : R s1 s2) {\funarrow} + eqst s1 s2 (bisim1 s1 s2 p) + (park_ppl (tail s1) + (tail s2) + (bisim2 s1 s2 p)). +End Parks_Principle. +\end{alltt} + +Let us use the principle of co-induction to prove the extensional +equality mentioned above. +\begin{alltt} +Theorem map_iterate : {\prodsym} (a:Set)(f:A{\arrow}A)(x:A), + EqSt (iterate f (f x)) + (map f (iterate f x)). +Proof. + intros A f x. + apply park_ppl with + (R:= fun s1 s2 {\funarrow} + {\exsym} x: A, s1 = iterate f (f x) {\coqand} + s2 = map f (iterate f x)). + + intros s1 s2 (x0,(eqs1,eqs2)); + rewrite eqs1; rewrite eqs2; reflexivity. + intros s1 s2 (x0,(eqs1,eqs2)). + exists (f x0);split; + [rewrite eqs1|rewrite eqs2]; reflexivity. + exists x;split; reflexivity. +Qed. +\end{alltt} + +The use of Park's principle is sometimes annoying, because it requires +to find an invariant relation and prove that it is indeed a +bisimulation. In many cases, a shorter proof can be obtained trying +to construct an ad-hoc infinite proof, defined by a guarded +declaration. The tactic ``~``\texttt{Cofix $f$}~'' can be used to do +that. Similarly to the tactic \texttt{fix} indicated in Section +\ref{WellFoundedRecursion}, this tactic introduces an extra hypothesis +$f$ into the context, whose type is the same as the current goal. Note +that the applications of $f$ in the proof \textsl{must be guarded}. In +order to prevent us from doing unguarded calls, we can define a tactic +that always apply a constructor before using $f$ \refmancite{Chapter +\ref{WritingTactics}} : + +\begin{alltt} +Ltac infiniteproof f := + cofix f; + constructor; + [clear f| simpl; try (apply f; clear f)]. +\end{alltt} + + +In the example above, this tactic produces a much simpler proof +that the former one: + +\begin{alltt} +Theorem map_iterate' : {\prodsym} ((A:Set)f:A{\arrow}A)(x:A), + EqSt (iterate f (f x)) + (map f (iterate f x)). +Proof. + infiniteproof map_iterate'. + reflexivity. +Qed. +\end{alltt} + +\begin{exercise} +Define a co-inductive type $Nat$ containing non-standard +natural numbers --this is, verifying + +$$\exists m \in \mbox{\texttt{Nat}}, \forall\, n \in \mbox{\texttt{Nat}}, n Infinite l + A : Set + a : A + l : LList A + H0 : ~ Finite (LCons a l) + ============================ + Infinite l +\end{alltt} +At this point, one must not apply \citecoq{H}! . It would be possible +to solve the current goal by an inversion of ``~\citecoq{Finite (LCons a l)}~'', but, since the guard condition would be violated, the user +would get an error message after typing \citecoq{Qed}. +In order to satisfy the guard condition, we apply the constructor of +\citecoq{Infinite}, \emph{then} apply \citecoq{H}. + +\begin{alltt} + constructor. + apply H. + red; intro H1;case H0. + constructor. + trivial. +Qed. +\end{alltt} + + + + +The reader is invited to replay this proof and understand each of its steps. + + +\bibliographystyle{abbrv} +\bibliography{manbiblio,morebib} + +\end{document} + diff --git a/doc/RecTutorial/RecTutorial.v b/doc/RecTutorial/RecTutorial.v new file mode 100644 index 00000000..d79b85df --- /dev/null +++ b/doc/RecTutorial/RecTutorial.v @@ -0,0 +1,1229 @@ +Inductive nat : Set := + | O : nat + | S : nat->nat. +Check nat. +Check O. +Check S. + +Reset nat. +Print nat. + + +Print le. + +Theorem zero_leq_three: 0 <= 3. + +Proof. + constructor 2. + constructor 2. + constructor 2. + constructor 1. + +Qed. + +Print zero_leq_three. + + +Lemma zero_leq_three': 0 <= 3. + repeat constructor. +Qed. + + +Lemma zero_lt_three : 0 < 3. +Proof. + unfold lt. + repeat constructor. +Qed. + + +Require Import List. + +Print list. + +Check list. + +Check (nil (A:=nat)). + +Check (nil (A:= nat -> nat)). + +Check (fun A: Set => (cons (A:=A))). + +Check (cons 3 (cons 2 nil)). + + + + +Require Import Bvector. + +Print vector. + +Check (Vnil nat). + +Check (fun (A:Set)(a:A)=> Vcons _ a _ (Vnil _)). + +Check (Vcons _ 5 _ (Vcons _ 3 _ (Vnil _))). + + + + + + + + + + + + + +Lemma eq_3_3 : 2 + 1 = 3. +Proof. + reflexivity. +Qed. +Print eq_3_3. + +Lemma eq_proof_proof : refl_equal (2*6) = refl_equal (3*4). +Proof. + reflexivity. +Qed. +Print eq_proof_proof. + +Lemma eq_lt_le : ( 2 < 4) = (3 <= 4). +Proof. + reflexivity. +Qed. + +Lemma eq_nat_nat : nat = nat. +Proof. + reflexivity. +Qed. + +Lemma eq_Set_Set : Set = Set. +Proof. + reflexivity. +Qed. + +Lemma eq_Type_Type : Type = Type. +Proof. + reflexivity. +Qed. + + +Check (2 + 1 = 3). + + +Check (Type = Type). + +Goal Type = Type. +reflexivity. +Qed. + + +Print or. + +Print and. + + +Print sumbool. + +Print ex. + +Require Import ZArith. +Require Import Compare_dec. + +Check le_lt_dec. + +Definition max (n p :nat) := match le_lt_dec n p with + | left _ => p + | right _ => n + end. + +Theorem le_max : forall n p, n <= p -> max n p = p. +Proof. + intros n p ; unfold max ; case (le_lt_dec n p); simpl. + trivial. + intros; absurd (p < p); eauto with arith. +Qed. + +Extraction max. + + + + + + +Inductive tree(A:Set) : Set := + node : A -> forest A -> tree A +with + forest (A: Set) : Set := + nochild : forest A | + addchild : tree A -> forest A -> forest A. + + + + + +Inductive + even : nat->Prop := + evenO : even O | + evenS : forall n, odd n -> even (S n) +with + odd : nat->Prop := + oddS : forall n, even n -> odd (S n). + +Lemma odd_49 : odd (7 * 7). + simpl; repeat constructor. +Qed. + + + +Definition nat_case := + fun (Q : Type)(g0 : Q)(g1 : nat -> Q)(n:nat) => + match n return Q with + | 0 => g0 + | S p => g1 p + end. + +Eval simpl in (nat_case nat 0 (fun p => p) 34). + +Eval simpl in (fun g0 g1 => nat_case nat g0 g1 34). + +Eval simpl in (fun g0 g1 => nat_case nat g0 g1 0). + + +Definition pred (n:nat) := match n with O => O | S m => m end. + +Eval simpl in pred 56. + +Eval simpl in pred 0. + +Eval simpl in fun p => pred (S p). + + +Definition xorb (b1 b2:bool) := +match b1, b2 with + | false, true => true + | true, false => true + | _ , _ => false +end. + + + Definition pred_spec (n:nat) := {m:nat | n=0 /\ m=0 \/ n = S m}. + + + Definition predecessor : forall n:nat, pred_spec n. + intro n;case n. + unfold pred_spec;exists 0;auto. + unfold pred_spec; intro n0;exists n0; auto. + Defined. + +Print predecessor. + +Extraction predecessor. + +Theorem nat_expand : + forall n:nat, n = match n with 0 => 0 | S p => S p end. + intro n;case n;simpl;auto. +Qed. + +Check (fun p:False => match p return 2=3 with end). + +Theorem fromFalse : False -> 0=1. + intro absurd. + contradiction. +Qed. + +Section equality_elimination. + Variables (A: Type) + (a b : A) + (p : a = b) + (Q : A -> Type). + Check (fun H : Q a => + match p in (eq _ y) return Q y with + refl_equal => H + end). + +End equality_elimination. + + +Theorem trans : forall n m p:nat, n=m -> m=p -> n=p. +Proof. + intros n m p eqnm. + case eqnm. + trivial. +Qed. + +Lemma Rw : forall x y: nat, y = y * x -> y * x * x = y. + intros x y e; do 2 rewrite <- e. + reflexivity. +Qed. + + +Require Import Arith. + +Check mult_1_l. +(* +mult_1_l + : forall n : nat, 1 * n = n +*) + +Check mult_plus_distr_r. +(* +mult_plus_distr_r + : forall n m p : nat, (n + m) * p = n * p + m * p + +*) + +Lemma mult_distr_S : forall n p : nat, n * p + p = (S n)* p. + simpl;auto with arith. +Qed. + +Lemma four_n : forall n:nat, n+n+n+n = 4*n. + intro n;rewrite <- (mult_1_l n). + + Undo. + intro n; pattern n at 1. + + + rewrite <- mult_1_l. + repeat rewrite mult_distr_S. + trivial. +Qed. + + +Section Le_case_analysis. + Variables (n p : nat) + (H : n <= p) + (Q : nat -> Prop) + (H0 : Q n) + (HS : forall m, n <= m -> Q (S m)). + Check ( + match H in (_ <= q) return (Q q) with + | le_n => H0 + | le_S m Hm => HS m Hm + end + ). + + +End Le_case_analysis. + + +Lemma predecessor_of_positive : forall n, 1 <= n -> exists p:nat, n = S p. +Proof. + intros n H; case H. + exists 0; trivial. + intros m Hm; exists m;trivial. +Qed. + +Definition Vtail_total + (A : Set) (n : nat) (v : vector A n) : vector A (pred n):= +match v in (vector _ n0) return (vector A (pred n0)) with +| Vnil => Vnil A +| Vcons _ n0 v0 => v0 +end. + +Definition Vtail' (A:Set)(n:nat)(v:vector A n) : vector A (pred n). + intros A n v; case v. + simpl. + exact (Vnil A). + simpl. + auto. +Defined. + +(* +Inductive Lambda : Set := + lambda : (Lambda -> False) -> Lambda. + + +Error: Non strictly positive occurrence of "Lambda" in + "(Lambda -> False) -> Lambda" + +*) + +Section Paradox. + Variable Lambda : Set. + Variable lambda : (Lambda -> False) ->Lambda. + + Variable matchL : Lambda -> forall Q:Prop, ((Lambda ->False) -> Q) -> Q. + (* + understand matchL Q l (fun h : Lambda -> False => t) + + as match l return Q with lambda h => t end + *) + + Definition application (f x: Lambda) :False := + matchL f False (fun h => h x). + + Definition Delta : Lambda := lambda (fun x : Lambda => application x x). + + Definition loop : False := application Delta Delta. + + Theorem two_is_three : 2 = 3. + Proof. + elim loop. + Qed. + +End Paradox. + + +Require Import ZArith. + + + +Inductive itree : Set := +| ileaf : itree +| inode : Z-> (nat -> itree) -> itree. + +Definition isingle l := inode l (fun i => ileaf). + +Definition t1 := inode 0 (fun n => isingle (Z_of_nat (2*n))). + +Definition t2 := inode 0 + (fun n : nat => + inode (Z_of_nat n) + (fun p => isingle (Z_of_nat (n*p)))). + + +Inductive itree_le : itree-> itree -> Prop := + | le_leaf : forall t, itree_le ileaf t + | le_node : forall l l' s s', + Zle l l' -> + (forall i, exists j:nat, itree_le (s i) (s' j)) -> + itree_le (inode l s) (inode l' s'). + + +Theorem itree_le_trans : + forall t t', itree_le t t' -> + forall t'', itree_le t' t'' -> itree_le t t''. + induction t. + constructor 1. + + intros t'; case t'. + inversion 1. + intros z0 i0 H0. + intro t'';case t''. + inversion 1. + intros. + inversion_clear H1. + constructor 2. + inversion_clear H0;eauto with zarith. + inversion_clear H0. + intro i2; case (H4 i2). + intros. + generalize (H i2 _ H0). + intros. + case (H3 x);intros. + generalize (H5 _ H6). + exists x0;auto. +Qed. + + + +Inductive itree_le' : itree-> itree -> Prop := + | le_leaf' : forall t, itree_le' ileaf t + | le_node' : forall l l' s s' g, + Zle l l' -> + (forall i, itree_le' (s i) (s' (g i))) -> + itree_le' (inode l s) (inode l' s'). + + + + + +Lemma t1_le_t2 : itree_le t1 t2. + unfold t1, t2. + constructor. + auto with zarith. + intro i; exists (2 * i). + unfold isingle. + constructor. + auto with zarith. + exists i;constructor. +Qed. + + + +Lemma t1_le'_t2 : itree_le' t1 t2. + unfold t1, t2. + constructor 2 with (fun i : nat => 2 * i). + auto with zarith. + unfold isingle; + intro i ; constructor 2 with (fun i :nat => i). + auto with zarith. + constructor . +Qed. + + +Require Import List. + +Inductive ltree (A:Set) : Set := + lnode : A -> list (ltree A) -> ltree A. + +Inductive prop : Prop := + prop_intro : Prop -> prop. + +Lemma prop_inject: prop. +Proof prop_intro prop. + + +Inductive ex_Prop (P : Prop -> Prop) : Prop := + exP_intro : forall X : Prop, P X -> ex_Prop P. + +Lemma ex_Prop_inhabitant : ex_Prop (fun P => P -> P). +Proof. + exists (ex_Prop (fun P => P -> P)). + trivial. +Qed. + + + + +(* + +Check (fun (P:Prop->Prop)(p: ex_Prop P) => + match p with exP_intro X HX => X end). +Error: +Incorrect elimination of "p" in the inductive type +"ex_Prop", the return type has sort "Type" while it should be +"Prop" + +Elimination of an inductive object of sort "Prop" +is not allowed on a predicate in sort "Type" +because proofs can be eliminated only to build proofs + +*) + +(* +Check (match prop_inject with (prop_intro P p) => P end). + +Error: +Incorrect elimination of "prop_inject" in the inductive type +"prop", the return type has sort "Type" while it should be +"Prop" + +Elimination of an inductive object of sort "Prop" +is not allowed on a predicate in sort "Type" +because proofs can be eliminated only to build proofs + +*) +Print prop_inject. + +(* +prop_inject = +prop_inject = prop_intro prop (fun H : prop => H) + : prop +*) + + +Inductive typ : Type := + typ_intro : Type -> typ. + +Definition typ_inject: typ. +split. +exact typ. +(* +Defined. + +Error: Universe Inconsistency. +*) +Abort. +(* + +Inductive aSet : Set := + aSet_intro: Set -> aSet. + + +User error: Large non-propositional inductive types must be in Type + +*) + +Inductive ex_Set (P : Set -> Prop) : Type := + exS_intro : forall X : Set, P X -> ex_Set P. + + +Inductive comes_from_the_left (P Q:Prop): P \/ Q -> Prop := + c1 : forall p, comes_from_the_left P Q (or_introl (A:=P) Q p). + +Goal (comes_from_the_left _ _ (or_introl True I)). +split. +Qed. + +Goal ~(comes_from_the_left _ _ (or_intror True I)). + red;inversion 1. + (* discriminate H0. + *) +Abort. + +Reset comes_from_the_left. + +(* + + + + + + + Definition comes_from_the_left (P Q:Prop)(H:P \/ Q): Prop := + match H with + | or_introl p => True + | or_intror q => False + end. + +Error: +Incorrect elimination of "H" in the inductive type +"or", the return type has sort "Type" while it should be +"Prop" + +Elimination of an inductive object of sort "Prop" +is not allowed on a predicate in sort "Type" +because proofs can be eliminated only to build proofs + +*) + +Definition comes_from_the_left_sumbool + (P Q:Prop)(x:{P}+{Q}): Prop := + match x with + | left p => True + | right q => False + end. + + + + +Close Scope Z_scope. + + + + + +Theorem S_is_not_O : forall n, S n <> 0. + +Definition Is_zero (x:nat):= match x with + | 0 => True + | _ => False + end. + Lemma O_is_zero : forall m, m = 0 -> Is_zero m. + Proof. + intros m H; subst m. + (* + ============================ + Is_zero 0 + *) + simpl;trivial. + Qed. + + red; intros n Hn. + apply O_is_zero with (m := S n). + assumption. +Qed. + +Theorem disc2 : forall n, S (S n) <> 1. +Proof. + intros n Hn; discriminate. +Qed. + + +Theorem disc3 : forall n, S (S n) = 0 -> forall Q:Prop, Q. +Proof. + intros n Hn Q. + discriminate. +Qed. + + + +Theorem inj_succ : forall n m, S n = S m -> n = m. +Proof. + + +Lemma inj_pred : forall n m, n = m -> pred n = pred m. +Proof. + intros n m eq_n_m. + rewrite eq_n_m. + trivial. +Qed. + + intros n m eq_Sn_Sm. + apply inj_pred with (n:= S n) (m := S m); assumption. +Qed. + +Lemma list_inject : forall (A:Set)(a b :A)(l l':list A), + a :: b :: l = b :: a :: l' -> a = b /\ l = l'. +Proof. + intros A a b l l' e. + injection e. + auto. +Qed. + + +Theorem not_le_Sn_0 : forall n:nat, ~ (S n <= 0). +Proof. + red; intros n H. + case H. +Undo. + +Lemma not_le_Sn_0_with_constraints : + forall n p , S n <= p -> p = 0 -> False. +Proof. + intros n p H; case H ; + intros; discriminate. +Qed. + +eapply not_le_Sn_0_with_constraints; eauto. +Qed. + + +Theorem not_le_Sn_0' : forall n:nat, ~ (S n <= 0). +Proof. + red; intros n H ; inversion H. +Qed. + +Derive Inversion le_Sn_0_inv with (forall n :nat, S n <= 0). +Check le_Sn_0_inv. + +Theorem le_Sn_0'' : forall n p : nat, ~ S n <= 0 . +Proof. + intros n p H; + inversion H using le_Sn_0_inv. +Qed. + +Derive Inversion_clear le_Sn_0_inv' with (forall n :nat, S n <= 0). +Check le_Sn_0_inv'. + + +Theorem le_reverse_rules : + forall n m:nat, n <= m -> + n = m \/ + exists p, n <= p /\ m = S p. +Proof. + intros n m H; inversion H. + left;trivial. + right; exists m0; split; trivial. +Restart. + intros n m H; inversion_clear H. + left;trivial. + right; exists m0; split; trivial. +Qed. + +Inductive ArithExp : Set := + Zero : ArithExp + | Succ : ArithExp -> ArithExp + | Plus : ArithExp -> ArithExp -> ArithExp. + +Inductive RewriteRel : ArithExp -> ArithExp -> Prop := + RewSucc : forall e1 e2 :ArithExp, + RewriteRel e1 e2 -> RewriteRel (Succ e1) (Succ e2) + | RewPlus0 : forall e:ArithExp, + RewriteRel (Plus Zero e) e + | RewPlusS : forall e1 e2:ArithExp, + RewriteRel e1 e2 -> + RewriteRel (Plus (Succ e1) e2) (Succ (Plus e1 e2)). + + + +Fixpoint plus (n p:nat) {struct n} : nat := + match n with + | 0 => p + | S m => S (plus m p) + end. + +Fixpoint plus' (n p:nat) {struct p} : nat := + match p with + | 0 => n + | S q => S (plus' n q) + end. + +Fixpoint plus'' (n p:nat) {struct n} : nat := + match n with + | 0 => p + | S m => plus'' m (S p) + end. + + +Fixpoint even_test (n:nat) : bool := + match n + with 0 => true + | 1 => false + | S (S p) => even_test p + end. + + +Reset even_test. + +Fixpoint even_test (n:nat) : bool := + match n + with + | 0 => true + | S p => odd_test p + end +with odd_test (n:nat) : bool := + match n + with + | 0 => false + | S p => even_test p + end. + + + +Eval simpl in even_test. + + + +Eval simpl in (fun x : nat => even_test x). + +Eval simpl in (fun x : nat => plus 5 x). +Eval simpl in (fun x : nat => even_test (plus 5 x)). + +Eval simpl in (fun x : nat => even_test (plus x 5)). + + +Section Principle_of_Induction. +Variable P : nat -> Prop. +Hypothesis base_case : P 0. +Hypothesis inductive_step : forall n:nat, P n -> P (S n). +Fixpoint nat_ind (n:nat) : (P n) := + match n return P n with + | 0 => base_case + | S m => inductive_step m (nat_ind m) + end. + +End Principle_of_Induction. + +Scheme Even_induction := Minimality for even Sort Prop +with Odd_induction := Minimality for odd Sort Prop. + +Theorem even_plus_four : forall n:nat, even n -> even (4+n). +Proof. + intros n H. + elim H using Even_induction with (P0 := fun n => odd (4+n)); + simpl;repeat constructor;assumption. +Qed. + + +Section Principle_of_Double_Induction. +Variable P : nat -> nat ->Prop. +Hypothesis base_case1 : forall x:nat, P 0 x. +Hypothesis base_case2 : forall x:nat, P (S x) 0. +Hypothesis inductive_step : forall n m:nat, P n m -> P (S n) (S m). +Fixpoint nat_double_ind (n m:nat){struct n} : P n m := + match n, m return P n m with + | 0 , x => base_case1 x + | (S x), 0 => base_case2 x + | (S x), (S y) => inductive_step x y (nat_double_ind x y) + end. +End Principle_of_Double_Induction. + +Section Principle_of_Double_Recursion. +Variable P : nat -> nat -> Set. +Hypothesis base_case1 : forall x:nat, P 0 x. +Hypothesis base_case2 : forall x:nat, P (S x) 0. +Hypothesis inductive_step : forall n m:nat, P n m -> P (S n) (S m). +Fixpoint nat_double_rec (n m:nat){struct n} : P n m := + match n, m return P n m with + | 0 , x => base_case1 x + | (S x), 0 => base_case2 x + | (S x), (S y) => inductive_step x y (nat_double_rec x y) + end. +End Principle_of_Double_Recursion. + +Definition min : nat -> nat -> nat := + nat_double_rec (fun (x y:nat) => nat) + (fun (x:nat) => 0) + (fun (y:nat) => 0) + (fun (x y r:nat) => S r). + +Eval compute in (min 5 8). +Eval compute in (min 8 5). + + + +Lemma not_circular : forall n:nat, n <> S n. +Proof. + intro n. + apply nat_ind with (P:= fun n => n <> S n). + discriminate. + red; intros n0 Hn0 eqn0Sn0;injection eqn0Sn0;trivial. +Qed. + +Definition eq_nat_dec : forall n p:nat , {n=p}+{n <> p}. +Proof. + intros n p. + apply nat_double_rec with (P:= fun (n q:nat) => {q=p}+{q <> p}). +Undo. + pattern p,n. + elim n using nat_double_rec. + destruct x; auto. + destruct x; auto. + intros n0 m H; case H. + intro eq; rewrite eq ; auto. + intro neg; right; red ; injection 1; auto. +Defined. + +Definition eq_nat_dec' : forall n p:nat, {n=p}+{n <> p}. + decide equality. +Defined. + +Print Acc. + + +Require Import Minus. + +(* +Fixpoint div (x y:nat){struct x}: nat := + if eq_nat_dec x 0 + then 0 + else if eq_nat_dec y 0 + then x + else S (div (x-y) y). + +Error: +Recursive definition of div is ill-formed. +In environment +div : nat -> nat -> nat +x : nat +y : nat +_ : x <> 0 +_ : y <> 0 + +Recursive call to div has principal argument equal to +"x - y" +instead of a subterm of x + +*) + +Lemma minus_smaller_S: forall x y:nat, x - y < S x. +Proof. + intros x y; pattern y, x; + elim x using nat_double_ind. + destruct x0; auto with arith. + simpl; auto with arith. + simpl; auto with arith. +Qed. + +Lemma minus_smaller_positive : forall x y:nat, x <>0 -> y <> 0 -> + x - y < x. +Proof. + destruct x; destruct y; + ( simpl;intros; apply minus_smaller_S || + intros; absurd (0=0); auto). +Qed. + +Definition minus_decrease : forall x y:nat, Acc lt x -> + x <> 0 -> + y <> 0 -> + Acc lt (x-y). +Proof. + intros x y H; case H. + intros Hz posz posy. + apply Hz; apply minus_smaller_positive; assumption. +Defined. + +Print minus_decrease. + + + +Definition div_aux (x y:nat)(H: Acc lt x):nat. + fix 3. + intros. + refine (if eq_nat_dec x 0 + then 0 + else if eq_nat_dec y 0 + then y + else div_aux (x-y) y _). + apply (minus_decrease x y H);assumption. +Defined. + + +Print div_aux. +(* +div_aux = +(fix div_aux (x y : nat) (H : Acc lt x) {struct H} : nat := + match eq_nat_dec x 0 with + | left _ => 0 + | right _ => + match eq_nat_dec y 0 with + | left _ => y + | right _0 => div_aux (x - y) y (minus_decrease x y H _ _0) + end + end) + : forall x : nat, nat -> Acc lt x -> nat +*) + +Require Import Wf_nat. +Definition div x y := div_aux x y (lt_wf x). + +Extraction div. +(* +let div x y = + div_aux x y +*) + +Extraction div_aux. + +(* +let rec div_aux x y = + match eq_nat_dec x O with + | Left -> O + | Right -> + (match eq_nat_dec y O with + | Left -> y + | Right -> div_aux (minus x y) y) +*) + +Lemma vector0_is_vnil : forall (A:Set)(v:vector A 0), v = Vnil A. +Proof. + intros A v;inversion v. +Abort. + +(* + Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:vector A n), + n= 0 -> v = Vnil A. + +Toplevel input, characters 40281-40287 +> Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:vector A n), n= 0 -> v = Vnil A. +> ^^^^^^ +Error: In environment +A : Set +n : nat +v : vector A n +e : n = 0 +The term "Vnil A" has type "vector A 0" while it is expected to have type + "vector A n" +*) + Require Import JMeq. + +Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:vector A n), + n= 0 -> JMeq v (Vnil A). +Proof. + destruct v. + auto. + intro; discriminate. +Qed. + +Lemma vector0_is_vnil : forall (A:Set)(v:vector A 0), v = Vnil A. +Proof. + intros a v;apply JMeq_eq. + apply vector0_is_vnil_aux. + trivial. +Qed. + + +Implicit Arguments Vcons [A n]. +Implicit Arguments Vnil [A]. +Implicit Arguments Vhead [A n]. +Implicit Arguments Vtail [A n]. + +Definition Vid : forall (A : Set)(n:nat), vector A n -> vector A n. +Proof. + destruct n; intro v. + exact Vnil. + exact (Vcons (Vhead v) (Vtail v)). +Defined. + +Eval simpl in (fun (A:Set)(v:vector A 0) => (Vid _ _ v)). + +Eval simpl in (fun (A:Set)(v:vector A 0) => v). + + + +Lemma Vid_eq : forall (n:nat) (A:Set)(v:vector A n), v=(Vid _ n v). +Proof. + destruct v. + reflexivity. + reflexivity. +Defined. + +Theorem zero_nil : forall A (v:vector A 0), v = Vnil. +Proof. + intros. + change (Vnil (A:=A)) with (Vid _ 0 v). + apply Vid_eq. +Defined. + + +Theorem decomp : + forall (A : Set) (n : nat) (v : vector A (S n)), + v = Vcons (Vhead v) (Vtail v). +Proof. + intros. + change (Vcons (Vhead v) (Vtail v)) with (Vid _ (S n) v). + apply Vid_eq. +Defined. + + + +Definition vector_double_rect : + forall (A:Set) (P: forall (n:nat),(vector A n)->(vector A n) -> Type), + P 0 Vnil Vnil -> + (forall n (v1 v2 : vector A n) a b, P n v1 v2 -> + P (S n) (Vcons a v1) (Vcons b v2)) -> + forall n (v1 v2 : vector A n), P n v1 v2. + induction n. + intros; rewrite (zero_nil _ v1); rewrite (zero_nil _ v2). + auto. + intros v1 v2; rewrite (decomp _ _ v1);rewrite (decomp _ _ v2). + apply X0; auto. +Defined. + +Require Import Bool. + +Definition bitwise_or n v1 v2 : vector bool n := + vector_double_rect bool (fun n v1 v2 => vector bool n) + Vnil + (fun n v1 v2 a b r => Vcons (orb a b) r) n v1 v2. + + +Fixpoint vector_nth (A:Set)(n:nat)(p:nat)(v:vector A p){struct v} + : option A := + match n,v with + _ , Vnil => None + | 0 , Vcons b _ _ => Some b + | S n', Vcons _ p' v' => vector_nth A n' p' v' + end. + +Implicit Arguments vector_nth [A p]. + + +Lemma nth_bitwise : forall (n:nat) (v1 v2: vector bool n) i a b, + vector_nth i v1 = Some a -> + vector_nth i v2 = Some b -> + vector_nth i (bitwise_or _ v1 v2) = Some (orb a b). +Proof. + intros n v1 v2; pattern n,v1,v2. + apply vector_double_rect. + simpl. + destruct i; discriminate 1. + destruct i; simpl;auto. + injection 1; injection 2;intros; subst a; subst b; auto. +Qed. + + Set Implicit Arguments. + + CoInductive Stream (A:Set) : Set := + | Cons : A -> Stream A -> Stream A. + + CoInductive LList (A: Set) : Set := + | LNil : LList A + | LCons : A -> LList A -> LList A. + + + + + + Definition head (A:Set)(s : Stream A) := match s with Cons a s' => a end. + + Definition tail (A : Set)(s : Stream A) := + match s with Cons a s' => s' end. + + CoFixpoint repeat (A:Set)(a:A) : Stream A := Cons a (repeat a). + + CoFixpoint iterate (A: Set)(f: A -> A)(a : A) : Stream A:= + Cons a (iterate f (f a)). + + CoFixpoint map (A B:Set)(f: A -> B)(s : Stream A) : Stream B:= + match s with Cons a tl => Cons (f a) (map f tl) end. + +Eval simpl in (fun (A:Set)(a:A) => repeat a). + +Eval simpl in (fun (A:Set)(a:A) => head (repeat a)). + + +CoInductive EqSt (A: Set) : Stream A -> Stream A -> Prop := + eqst : forall s1 s2: Stream A, + head s1 = head s2 -> + EqSt (tail s1) (tail s2) -> + EqSt s1 s2. + + +Section Parks_Principle. +Variable A : Set. +Variable R : Stream A -> Stream A -> Prop. +Hypothesis bisim1 : forall s1 s2:Stream A, R s1 s2 -> + head s1 = head s2. +Hypothesis bisim2 : forall s1 s2:Stream A, R s1 s2 -> + R (tail s1) (tail s2). + +CoFixpoint park_ppl : forall s1 s2:Stream A, R s1 s2 -> + EqSt s1 s2 := + fun s1 s2 (p : R s1 s2) => + eqst s1 s2 (bisim1 p) + (park_ppl (bisim2 p)). +End Parks_Principle. + + +Theorem map_iterate : forall (A:Set)(f:A->A)(x:A), + EqSt (iterate f (f x)) (map f (iterate f x)). +Proof. + intros A f x. + apply park_ppl with + (R:= fun s1 s2 => exists x: A, + s1 = iterate f (f x) /\ s2 = map f (iterate f x)). + + intros s1 s2 (x0,(eqs1,eqs2));rewrite eqs1;rewrite eqs2;reflexivity. + intros s1 s2 (x0,(eqs1,eqs2)). + exists (f x0);split;[rewrite eqs1|rewrite eqs2]; reflexivity. + exists x;split; reflexivity. +Qed. + +Ltac infiniteproof f := + cofix f; constructor; [clear f| simpl; try (apply f; clear f)]. + + +Theorem map_iterate' : forall (A:Set)(f:A->A)(x:A), + EqSt (iterate f (f x)) (map f (iterate f x)). +infiniteproof map_iterate'. + reflexivity. +Qed. + + +Implicit Arguments LNil [A]. + +Lemma Lnil_not_Lcons : forall (A:Set)(a:A)(l:LList A), + LNil <> (LCons a l). + intros;discriminate. +Qed. + +Lemma injection_demo : forall (A:Set)(a b : A)(l l': LList A), + LCons a (LCons b l) = LCons b (LCons a l') -> + a = b /\ l = l'. +Proof. + intros A a b l l' e; injection e; auto. +Qed. + + +Inductive Finite (A:Set) : LList A -> Prop := +| Lnil_fin : Finite (LNil (A:=A)) +| Lcons_fin : forall a l, Finite l -> Finite (LCons a l). + +CoInductive Infinite (A:Set) : LList A -> Prop := +| LCons_inf : forall a l, Infinite l -> Infinite (LCons a l). + +Lemma LNil_not_Infinite : forall (A:Set), ~ Infinite (LNil (A:=A)). +Proof. + intros A H;inversion H. +Qed. + +Lemma Finite_not_Infinite : forall (A:Set)(l:LList A), + Finite l -> ~ Infinite l. +Proof. + intros A l H; elim H. + apply LNil_not_Infinite. + intros a l0 F0 I0' I1. + case I0'; inversion_clear I1. + trivial. +Qed. + +Lemma Not_Finite_Infinite : forall (A:Set)(l:LList A), + ~ Finite l -> Infinite l. +Proof. + cofix H. + destruct l. + intro; absurd (Finite (LNil (A:=A)));[auto|constructor]. + constructor. + apply H. + red; intro H1;case H0. + constructor. + trivial. +Qed. + + + + diff --git a/doc/RecTutorial/coqartmacros.tex b/doc/RecTutorial/coqartmacros.tex new file mode 100644 index 00000000..6fb7534d --- /dev/null +++ b/doc/RecTutorial/coqartmacros.tex @@ -0,0 +1,180 @@ +\usepackage{url} + +\newcommand{\variantspringer}[1]{#1} +\newcommand{\marginok}[1]{\marginpar{\raggedright OK:#1}} +\newcommand{\tab}{{\null\hskip1cm}} +\newcommand{\Ltac}{\mbox{\emph{$\cal L$}tac}} +\newcommand{\coq}{\mbox{\emph{Coq}}} +\newcommand{\lcf}{\mbox{\emph{LCF}}} +\newcommand{\hol}{\mbox{\emph{HOL}}} +\newcommand{\pvs}{\mbox{\emph{PVS}}} +\newcommand{\isabelle}{\mbox{\emph{Isabelle}}} +\newcommand{\prolog}{\mbox{\emph{Prolog}}} +\newcommand{\goalbar}{\tt{}============================\it} +\newcommand{\gallina}{\mbox{\emph{Gallina}}} +\newcommand{\joker}{\texttt{\_}} +\newcommand{\eprime}{\(\e^{\prime}\)} +\newcommand{\Ztype}{\citecoq{Z}} +\newcommand{\propsort}{\citecoq{Prop}} +\newcommand{\setsort}{\citecoq{Set}} +\newcommand{\typesort}{\citecoq{Type}} +\newcommand{\ocaml}{\mbox{\emph{OCAML}}} +\newcommand{\haskell}{\mbox{\emph{Haskell}}} +\newcommand{\why}{\mbox{\emph{Why}}} +\newcommand{\Pascal}{\mbox{\emph{Pascal}}} + +\newcommand{\ml}{\mbox{\emph{ML}}} + +\newcommand{\scheme}{\mbox{\emph{Scheme}}} +\newcommand{\lisp}{\mbox{\emph{Lisp}}} + +\newcommand{\implarrow}{\mbox{$\Rightarrow$}} +\newcommand{\metavar}[1]{?#1} +\newcommand{\notincoq}[1]{#1} +\newcommand{\coqscope}[1]{\%#1} +\newcommand{\arrow}{\mbox{$\rightarrow$}} +\newcommand{\fleche}{\arrow} +\newcommand{\funarrow}{\mbox{$\Rightarrow$}} +\newcommand{\ltacarrow}{\funarrow} +\newcommand{\coqand}{\mbox{\(\wedge\)}} +\newcommand{\coqor}{\mbox{\(\vee\)}} +\newcommand{\coqnot}{\mbox{\(\neg\)}} +\newcommand{\hide}[1]{} +\newcommand{\hidedots}[1]{...} +\newcommand{\sig}[3]{\texttt{\{}#1\texttt{:}#2 \texttt{|} #3\texttt{\}}} +\renewcommand{\neg}{\sim} +\renewcommand{\marginpar}[1]{} + +\addtocounter{secnumdepth}{1} +\providecommand{\og}{«} +\providecommand{\fg}{»} + + +\newcommand{\hard}{\mbox{\small *}} +\newcommand{\xhard}{\mbox{\small **}} +\newcommand{\xxhard}{\mbox{\small ***}} + +%%% Operateurs, etc. +\newcommand{\impl}{\mbox{$\rightarrow$}} +\newcommand{\appli}[2]{\mbox{\tt{#1 #2}}} +\newcommand{\applis}[1]{\mbox{\texttt{#1}}} +\newcommand{\abst}[3]{\mbox{\tt{fun #1:#2 \funarrow #3}}} +\newcommand{\coqle}{\mbox{$\leq$}} +\newcommand{\coqge}{\mbox{$\geq$}} +\newcommand{\coqdiff}{\mbox{$\neq$}} +\newcommand{\coqiff}{\mbox{$\leftrightarrow$}} +\newcommand{\prodsym}{\mbox{\(\forall\,\)}} +\newcommand{\exsym}{\mbox{\(\exists\,\)}} + +\newcommand{\substsign}{/} +\newcommand{\subst}[3]{\mbox{#1\{#2\substsign{}#3\}}} +\newcommand{\anoabst}[2]{\mbox{\tt[#1]#2}} +\newcommand{\letin}[3]{\mbox{\tt let #1:=#2 in #3}} +\newcommand{\prodep}[3]{\mbox{\tt \(\forall\,\)#1:#2,$\,$#3}} +\newcommand{\prodplus}[2]{\mbox{\tt\(\forall\,\)$\,$#1,$\,$#2}} +\newcommand{\dom}[1]{\textrm{dom}(#1)} % domaine d'un contexte (log function) +\newcommand{\norm}[1]{\textrm{n}(#1)} % forme normale (log function) +\newcommand{\coqZ}[1]{\mbox{\tt{`#1`}}} +\newcommand{\coqnat}[1]{\mbox{\tt{#1}}} +\newcommand{\coqcart}[2]{\mbox{\tt{#1*#2}}} +\newcommand{\alphacong}{\mbox{$\,\cong_{\alpha}\,$}} % alpha-congruence +\newcommand{\betareduc}{\mbox{$\,\rightsquigarrow_{\!\beta}$}\,} % beta reduction +%\newcommand{\betastar}{\mbox{$\,\Rightarrow_{\!\beta}^{*}\,$}} % beta reduction +\newcommand{\deltareduc}{\mbox{$\,\rightsquigarrow_{\!\delta}$}\,} % delta reduction +\newcommand{\dbreduc}{\mbox{$\,\rightsquigarrow_{\!\delta\beta}$}\,} % delta,beta reduction +\newcommand{\ireduc}{\mbox{$\,\rightsquigarrow_{\!\iota}$}\,} % delta,beta reduction + + +% jugement de typage +\newcommand{\these}{\boldsymbol{\large \vdash}} +\newcommand{\disj}{\mbox{$\backslash/$}} +\newcommand{\conj}{\mbox{$/\backslash$}} +%\newcommand{\juge}[3]{\mbox{$#1 \boldsymbol{\vdash} #2 : #3 $}} +\newcommand{\juge}[4]{\mbox{$#1,#2 \these #3 \boldsymbol{:} #4 $}} +\newcommand{\smalljuge}[3]{\mbox{$#1 \these #2 \boldsymbol{:} #3 $}} +\newcommand{\goal}[3]{\mbox{$#1,#2 \these^{\!\!\!?} #3 $}} +\newcommand{\sgoal}[2]{\mbox{$#1\these^{\!\!\!\!?} #2 $}} +\newcommand{\reduc}[5]{\mbox{$#1,#2 \these #3 \rhd_{#4}#5 $}} +\newcommand{\convert}[5]{\mbox{$#1,#2 \these #3 =_{#4}#5 $}} +\newcommand{\convorder}[5]{\mbox{$#1,#2 \these #3\leq _{#4}#5 $}} +\newcommand{\wouff}[2]{\mbox{$\emph{WF}(#1)[#2]$}} + + +%\newcommand{\mthese}{\underset{M}{\vdash}} +\newcommand{\mthese}{\boldsymbol{\vdash}_{\!\!M}} +\newcommand{\type}{\boldsymbol{:}} + +% jugement absolu + +%\newcommand{\ajuge}[2]{\mbox{$ \boldsymbol{\vdash} #1 : #2 $}} +\newcommand{\ajuge}[2]{\mbox{$\these #1 \boldsymbol{:} #2 $}} + +%%% logique minimale +\newcommand{\propzero}{\mbox{$P_0$}} % types de Fzero + +%%% logique propositionnelle classique +\newcommand {\ff}{\boldsymbol{f}} % faux +\newcommand {\vv}{\boldsymbol{t}} % vrai + +\newcommand{\verite}{\mbox{$\cal{B}$}} % {\ff,\vv} +\newcommand{\sequ}[2]{\mbox{$#1 \vdash #2 $}} % sequent +\newcommand{\strip}[1]{#1^o} % enlever les variables d'un contexte + + + +%%% tactiques +\newcommand{\decomp}{\delta} % decomposition +\newcommand{\recomp}{\rho} % recomposition + +%%% divers +\newcommand{\cqfd}{\mbox{\textbf{cqfd}}} +\newcommand{\fail}{\mbox{\textbf{F}}} +\newcommand{\succes}{\mbox{$\blacksquare$}} +%%% Environnements + + +%% Fzero +\newcommand{\con}{\mbox{$\cal C$}} +\newcommand{\var}{\mbox{$\cal V$}} + +\newcommand{\atomzero}{\mbox{${\cal A}_0$}} % types de base de Fzero +\newcommand{\typezero}{\mbox{${\cal T}_0$}} % types de Fzero +\newcommand{\termzero}{\mbox{$\Lambda_0$}} % termes de Fzero +\newcommand{\conzero}{\mbox{$\cal C_0$}} % contextes de Fzero + +\newcommand{\buts}{\mbox{$\cal B$}} % buts + +%%% for drawing terms +% abstraction [x:t]e +\newcommand{\PicAbst}[3]{\begin{bundle}{\bf abst}\chunk{#1}\chunk{#2}\chunk{#3}% + \end{bundle}} + +% the same in DeBruijn form +\newcommand{\PicDbj}[2]{\begin{bundle}{\bf abst}\chunk{#1}\chunk{#2} + \end{bundle}} + + +% applications +\newcommand{\PicAppl}[2]{\begin{bundle}{\bf appl}\chunk{#1}\chunk{#2}% + \end{bundle}} + +% variables +\newcommand{\PicVar}[1]{\begin{bundle}{\bf var}\chunk{#1} + \end{bundle}} + +% constantes +\newcommand{\PicCon}[1]{\begin{bundle}{\bf const}\chunk{#1}\end{bundle}} + +% arrows +\newcommand{\PicImpl}[2]{\begin{bundle}{\impl}\chunk{#1}\chunk{#2}% + \end{bundle}} + + + +%%%% scripts coq +\newcommand{\prompt}{\mbox{\sl Coq $<\;$}} +\newcommand{\natquicksort}{\texttt{nat\_quicksort}} +\newcommand{\citecoq}[1]{\mbox{\texttt{#1}}} +\newcommand{\safeit}{\it} +\newtheorem{remarque}{Remark}[section] +%\newtheorem{definition}{Definition}[chapter] diff --git a/doc/RecTutorial/manbiblio.bib b/doc/RecTutorial/manbiblio.bib new file mode 100644 index 00000000..099e3bbd --- /dev/null +++ b/doc/RecTutorial/manbiblio.bib @@ -0,0 +1,875 @@ + +@STRING{toappear="To appear"} +@STRING{lncs="Lecture Notes in Computer Science"} + +@TECHREPORT{RefManCoq, + AUTHOR = {Bruno~Barras, Samuel~Boutin, + Cristina~Cornes, Judicaël~Courant, Yann~Coscoy, David~Delahaye, + Daniel~de~Rauglaudre, Jean-Christophe~Filliâtre, Eduardo~Giménez, + Hugo~Herbelin, Gérard~Huet, Henri~Laulhère, César~Muñoz, + Chetan~Murthy, Catherine~Parent-Vigouroux, Patrick~Loiseleur, + Christine~Paulin-Mohring, Amokrane~Saïbi, Benjamin~Werner}, + INSTITUTION = {INRIA}, + TITLE = {{The Coq Proof Assistant Reference Manual -- Version V6.2}}, + YEAR = {1998} +} + +@INPROCEEDINGS{Aud91, + AUTHOR = {Ph. Audebaud}, + BOOKTITLE = {Proceedings of the sixth Conf. on Logic in Computer Science.}, + PUBLISHER = {IEEE}, + TITLE = {Partial {Objects} in the {Calculus of Constructions}}, + YEAR = {1991} +} + +@PHDTHESIS{Aud92, + AUTHOR = {Ph. Audebaud}, + SCHOOL = {{Universit\'e} Bordeaux I}, + TITLE = {Extension du Calcul des Constructions par Points fixes}, + YEAR = {1992} +} + +@INPROCEEDINGS{Audebaud92b, + AUTHOR = {Ph. Audebaud}, + BOOKTITLE = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, + EDITOR = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, + NOTE = {Also Research Report LIP-ENS-Lyon}, + PAGES = {pp 21--34}, + TITLE = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, + YEAR = {1992} +} + +@INPROCEEDINGS{Augustsson85, + AUTHOR = {L. Augustsson}, + TITLE = {{Compiling Pattern Matching}}, + BOOKTITLE = {Conference Functional Programming and +Computer Architecture}, + YEAR = {1985} +} + +@INPROCEEDINGS{EG94a, + AUTHOR = {E. Gim\'enez}, + EDITORS = {P. Dybjer and B. Nordstr\"om and J. Smith}, + BOOKTITLE = {Workshop on Types for Proofs and Programs}, + PAGES = {39-59}, + SERIES = {LNCS}, + NUMBER = {996}, + TITLE = {{Codifying guarded definitions with recursive schemes}}, + YEAR = {1994}, + PUBLISHER = {Springer-Verlag}, +} + +@INPROCEEDINGS{EG95a, + AUTHOR = {E. Gim\'enez}, + BOOKTITLE = {Workshop on Types for Proofs and Programs}, + SERIES = {LNCS}, + NUMBER = {1158}, + PAGES = {135-152}, + TITLE = {An application of co-Inductive types in Coq: + verification of the Alternating Bit Protocol}, + EDITORS = {S. Berardi and M. Coppo}, + PUBLISHER = {Springer-Verlag}, + YEAR = {1995} +} + +@PhdThesis{EG96, + author = {E. Gim\'enez}, + title = {A Calculus of Infinite Constructions and its + application to the verification of communicating systems}, + school = {Ecole Normale Sup\'erieure de Lyon}, + year = {1996} +} + +@ARTICLE{BaCo85, + AUTHOR = {J.L. Bates and R.L. Constable}, + JOURNAL = {ACM transactions on Programming Languages and Systems}, + TITLE = {Proofs as {Programs}}, + VOLUME = {7}, + YEAR = {1985} +} + +@BOOK{Bar81, + AUTHOR = {H.P. Barendregt}, + PUBLISHER = {North-Holland}, + TITLE = {The Lambda Calculus its Syntax and Semantics}, + YEAR = {1981} +} + +@TECHREPORT{Bar91, + AUTHOR = {H. Barendregt}, + INSTITUTION = {Catholic University Nijmegen}, + NOTE = {In Handbook of Logic in Computer Science, Vol II}, + NUMBER = {91-19}, + TITLE = {Lambda {Calculi with Types}}, + YEAR = {1991} +} + +@BOOK{Bastad92, + EDITOR = {B. Nordstr\"om and K. Petersson and G. Plotkin}, + PUBLISHER = {Available by ftp at site ftp.inria.fr}, + TITLE = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, + YEAR = {1992} +} + +@BOOK{Bee85, + AUTHOR = {M.J. Beeson}, + PUBLISHER = {Springer-Verlag}, + TITLE = {Foundations of Constructive Mathematics, Metamathematical Studies}, + YEAR = {1985} +} + +@ARTICLE{BeKe92, + AUTHOR = {G. Bellin and J. Ketonen}, + JOURNAL = {Theoretical Computer Science}, + PAGES = {115--142}, + TITLE = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, + VOLUME = {95}, + YEAR = {1992} +} + +@BOOK{Bis67, + AUTHOR = {E. Bishop}, + PUBLISHER = {McGraw-Hill}, + TITLE = {Foundations of Constructive Analysis}, + YEAR = {1967} +} + +@BOOK{BoMo79, + AUTHOR = {R.S. Boyer and J.S. Moore}, + KEY = {BoMo79}, + PUBLISHER = {Academic Press}, + SERIES = {ACM Monograph}, + TITLE = {A computational logic}, + YEAR = {1979} +} + +@MASTERSTHESIS{Bou92, + AUTHOR = {S. Boutin}, + MONTH = sep, + SCHOOL = {{Universit\'e Paris 7}}, + TITLE = {Certification d'un compilateur {ML en Coq}}, + YEAR = {1992} +} + +@ARTICLE{Bru72, + AUTHOR = {N.J. de Bruijn}, + JOURNAL = {Indag. Math.}, + TITLE = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, + VOLUME = {34}, + YEAR = {1972} +} + +@INCOLLECTION{Bru80, + AUTHOR = {N.J. de Bruijn}, + BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + EDITOR = {J.P. Seldin and J.R. Hindley}, + PUBLISHER = {Academic Press}, + TITLE = {A survey of the project {Automath}}, + YEAR = {1980} +} + +@TECHREPORT{Leroy90, + AUTHOR = {X. Leroy}, + TITLE = {The {ZINC} experiment: an economical implementation +of the {ML} language}, + INSTITUTION = {INRIA}, + NUMBER = {117}, + YEAR = {1990} +} + +@BOOK{Caml, + AUTHOR = {P. Weis and X. Leroy}, + PUBLISHER = {InterEditions}, + TITLE = {Le langage Caml}, + YEAR = {1993} +} + +@TECHREPORT{CoC89, + AUTHOR = {Projet Formel}, + INSTITUTION = {INRIA}, + NUMBER = {110}, + TITLE = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, + YEAR = {1989} +} + +@INPROCEEDINGS{CoHu85a, + AUTHOR = {Th. Coquand and G. Huet}, + ADDRESS = {Linz}, + BOOKTITLE = {EUROCAL'85}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, + VOLUME = {203}, + YEAR = {1985} +} + +@Misc{Bar98, + author = {B. Barras}, + title = {A formalisation of + \uppercase{B}urali-\uppercase{F}orti's paradox in Coq}, + howpublished = {Distributed within the bunch of contribution to the + Coq system}, + year = {1998}, + month = {March}, + note = {\texttt{http://pauillac.inria.fr/coq}} +} + + +@INPROCEEDINGS{CoHu85b, + AUTHOR = {Th. Coquand and G. Huet}, + BOOKTITLE = {Logic Colloquium'85}, + EDITOR = {The Paris Logic Group}, + PUBLISHER = {North-Holland}, + TITLE = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, + YEAR = {1987} +} + +@ARTICLE{CoHu86, + AUTHOR = {Th. Coquand and G. Huet}, + JOURNAL = {Information and Computation}, + NUMBER = {2/3}, + TITLE = {The {Calculus of Constructions}}, + VOLUME = {76}, + YEAR = {1988} +} + +@BOOK{Con86, + AUTHOR = {R.L. {Constable et al.}}, + PUBLISHER = {Prentice-Hall}, + TITLE = {{Implementing Mathematics with the Nuprl Proof Development System}}, + YEAR = {1986} +} + +@INPROCEEDINGS{CoPa89, + AUTHOR = {Th. Coquand and C. Paulin-Mohring}, + BOOKTITLE = {Proceedings of Colog'88}, + EDITOR = {P. Martin-L{\"o}f and G. Mints}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {Inductively defined types}, + VOLUME = {417}, + YEAR = {1990} +} + +@PHDTHESIS{Coq85, + AUTHOR = {Th. Coquand}, + MONTH = jan, + SCHOOL = {Universit\'e Paris~7}, + TITLE = {Une Th\'eorie des Constructions}, + YEAR = {1985} +} + +@INPROCEEDINGS{Coq86, + AUTHOR = {Th. Coquand}, + ADDRESS = {Cambridge, MA}, + BOOKTITLE = {Symposium on Logic in Computer Science}, + PUBLISHER = {IEEE Computer Society Press}, + TITLE = {{An Analysis of Girard's Paradox}}, + YEAR = {1986} +} + +@INPROCEEDINGS{Coq90, + AUTHOR = {Th. Coquand}, + BOOKTITLE = {Logic and Computer Science}, + EDITOR = {P. Oddifredi}, + NOTE = {INRIA Research Report 1088, also in~\cite{CoC89}}, + PUBLISHER = {Academic Press}, + TITLE = {{Metamathematical Investigations of a Calculus of Constructions}}, + YEAR = {1990} +} + +@INPROCEEDINGS{Coq92, + AUTHOR = {Th. Coquand}, + BOOKTITLE = {in \cite{Bastad92}}, + TITLE = {{Pattern Matching with Dependent Types}}, + YEAR = {1992}, + crossref = {Bastad92} +} + +@TECHREPORT{COQ93, + AUTHOR = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, + INSTITUTION = {INRIA}, + MONTH = may, + NUMBER = {154}, + TITLE = {{The Coq Proof Assistant User's Guide Version 5.8}}, + YEAR = {1993} +} + +@INPROCEEDINGS{Coquand93, + AUTHOR = {Th. Coquand}, + BOOKTITLE = {in \cite{Nijmegen93}}, + TITLE = {{Infinite Objects in Type Theory}}, + YEAR = {1993}, + crossref = {Nijmegen93} +} + +@MASTERSTHESIS{Cou94a, + AUTHOR = {J. Courant}, + MONTH = sep, + SCHOOL = {DEA d'Informatique, ENS Lyon}, + TITLE = {Explicitation de preuves par r\'ecurrence implicite}, + YEAR = {1994} +} + +@TECHREPORT{CPar93, + AUTHOR = {C. Parent}, + INSTITUTION = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + MONTH = oct, + NOTE = {Also in~\cite{Nijmegen93}}, + NUMBER = {93-29}, + TITLE = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, + YEAR = {1993} +} + +@PHDTHESIS{CPar95, + AUTHOR = {C. Parent}, + SCHOOL = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + TITLE = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, + YEAR = {1995} +} + +@TECHREPORT{Dow90, + AUTHOR = {G. Dowek}, + INSTITUTION = {INRIA}, + NUMBER = {1283}, + TITLE = {{Naming and Scoping in a Mathematical Vernacular}}, + TYPE = {Research Report}, + YEAR = {1990} +} + +@ARTICLE{Dow91a, + AUTHOR = {G. Dowek}, + JOURNAL = {{Compte Rendu de l'Acad\'emie des Sciences}}, + NOTE = {(The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors)}, + NUMBER = {12}, + PAGES = {951--956}, + TITLE = {{L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}}, + VOLUME = {I, 312}, + YEAR = {1991} +} + +@INPROCEEDINGS{Dow91b, + AUTHOR = {G. Dowek}, + BOOKTITLE = {Proceedings of Mathematical Foundation of Computer Science}, + NOTE = {Also INRIA Research Report}, + PAGES = {151--160}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{A Second Order Pattern Matching Algorithm in the Cube of Typed {$\lambda$}-calculi}}, + VOLUME = {520}, + YEAR = {1991} +} + +@PHDTHESIS{Dow91c, + AUTHOR = {G. Dowek}, + MONTH = dec, + SCHOOL = {{Universit\'e Paris 7}}, + TITLE = {{D\'emonstration automatique dans le Calcul des Constructions}}, + YEAR = {1991} +} + +@ARTICLE{dowek93, + AUTHOR = {G. Dowek}, + TITLE = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, + JOURNAL = {Journal Logic Computation}, + VOLUME = {3}, + NUMBER = {3}, + PAGES = {287--315}, + MONTH = {June}, + YEAR = {1993} +} + +@UNPUBLISHED{Dow92a, + AUTHOR = {G. Dowek}, + NOTE = {To appear in Theoretical Computer Science}, + TITLE = {{The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}}, + YEAR = {1992} +} + +@ARTICLE{Dow94a, + AUTHOR = {G. Dowek}, + JOURNAL = {Annals of Pure and Applied Logic}, + VOLUME = {69}, + PAGES = {135--155}, + TITLE = {Third order matching is decidable}, + YEAR = {1994} +} + +@INPROCEEDINGS{Dow94b, + AUTHOR = {G. Dowek}, + BOOKTITLE = {Proceedings of the second international conference on typed lambda calculus and applications}, + TITLE = {{Lambda-calculus, Combinators and the Comprehension Schema}}, + YEAR = {1995} +} + +@INPROCEEDINGS{Dyb91, + AUTHOR = {P. Dybjer}, + BOOKTITLE = {Logical Frameworks}, + EDITOR = {G. Huet and G. Plotkin}, + PAGES = {59--79}, + PUBLISHER = {Cambridge University Press}, + TITLE = {{Inductive sets and families in {Martin-L{\"o}f's Type Theory} and their set-theoretic semantics : An inversion principle for {Martin-L\"of's} type theory}}, + VOLUME = {14}, + YEAR = {1991} +} + +@ARTICLE{Dyc92, + AUTHOR = {Roy Dyckhoff}, + JOURNAL = {The Journal of Symbolic Logic}, + MONTH = sep, + NUMBER = {3}, + TITLE = {Contraction-free sequent calculi for intuitionistic logic}, + VOLUME = {57}, + YEAR = {1992} +} + +@MASTERSTHESIS{Fil94, + AUTHOR = {J.-C. Filli\^atre}, + MONTH = sep, + SCHOOL = {DEA d'Informatique, ENS Lyon}, + TITLE = {Une proc\'edure de d\'ecision pour le {C}alcul des {P}r\'edicats {D}irect. {E}tude et impl\'ementation dans le syst\`eme {C}oq}, + YEAR = {1994} +} + +@TECHREPORT{Filliatre95, + AUTHOR = {J.-C. Filli\^atre}, + INSTITUTION = {LIP-ENS-Lyon}, + TITLE = {{A decision procedure for Direct Predicate Calculus}}, + TYPE = {Research report}, + NUMBER = {96--25}, + YEAR = {1995} +} + +@UNPUBLISHED{Fle90, + AUTHOR = {E. Fleury}, + MONTH = jul, + NOTE = {Rapport de Stage}, + TITLE = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, + YEAR = {1990} +} + + +@TechReport{Gim98, + author = {E. Gim\'nez}, + title = {A Tutorial on Recursive Types in Coq}, + institution = {INRIA}, + year = {1998} +} + +@TECHREPORT{HKP97, + author = {G. Huet and G. Kahn and Ch. Paulin-Mohring}, + title = {The {Coq} Proof Assistant - A tutorial, Version 6.1}, + institution = {INRIA}, + type = {rapport technique}, + month = {Août}, + year = {1997}, + note = {Version révisée distribuée avec {Coq}}, + number = {204}, +} + +<<<<<<< biblio.bib + + +======= +>>>>>>> 1.4 +@INPROCEEDINGS{Gir70, + AUTHOR = {J.-Y. Girard}, + BOOKTITLE = {Proceedings of the 2nd Scandinavian Logic Symposium}, + PUBLISHER = {North-Holland}, + TITLE = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, + YEAR = {1970} +} + +@PHDTHESIS{Gir72, + AUTHOR = {J.-Y. Girard}, + SCHOOL = {Universit\'e Paris~7}, + TITLE = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, + YEAR = {1972} +} + +@BOOK{Gir89, + AUTHOR = {J.-Y. Girard and Y. Lafont and P. Taylor}, + PUBLISHER = {Cambridge University Press}, + SERIES = {Cambridge Tracts in Theoretical Computer Science 7}, + TITLE = {Proofs and Types}, + YEAR = {1989} +} + +@MASTERSTHESIS{Hir94, + AUTHOR = {D. Hirschkoff}, + MONTH = sep, + SCHOOL = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, + TITLE = {{Ecriture d'une tactique arithm\'etique pour le syst\`eme Coq}}, + YEAR = {1994} +} + +@INCOLLECTION{How80, + AUTHOR = {W.A. Howard}, + BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + EDITOR = {J.P. Seldin and J.R. Hindley}, + NOTE = {Unpublished 1969 Manuscript}, + PUBLISHER = {Academic Press}, + TITLE = {The Formulae-as-Types Notion of Constructions}, + YEAR = {1980} +} + +@INCOLLECTION{HuetLevy79, + AUTHOR = {G. Huet and J.-J. L\'{e}vy}, + TITLE = {Call by Need Computations in Non-Ambigous +Linear Term Rewriting Systems}, + NOTE = {Also research report 359, INRIA, 1979}, + BOOKTITLE = {Computational Logic, Essays in Honor of +Alan Robinson}, + EDITOR = {J.-L. Lassez and G. Plotkin}, + PUBLISHER = {The MIT press}, + YEAR = {1991} +} + +@INPROCEEDINGS{Hue87, + AUTHOR = {G. Huet}, + BOOKTITLE = {Programming of Future Generation Computers}, + EDITOR = {K. Fuchi and M. Nivat}, + NOTE = {Also in Proceedings of TAPSOFT87, LNCS 249, Springer-Verlag, 1987, pp 276--286}, + PUBLISHER = {Elsevier Science}, + TITLE = {Induction Principles Formalized in the {Calculus of Constructions}}, + YEAR = {1988} +} + +@INPROCEEDINGS{Hue88, + AUTHOR = {G. Huet}, + BOOKTITLE = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, + EDITOR = {R. Narasimhan}, + NOTE = {Also in~\cite{CoC89}}, + PUBLISHER = {World Scientific Publishing}, + TITLE = {{The Constructive Engine}}, + YEAR = {1989} +} + +@BOOK{Hue89, + EDITOR = {G. Huet}, + PUBLISHER = {Addison-Wesley}, + SERIES = {The UT Year of Programming Series}, + TITLE = {Logical Foundations of Functional Programming}, + YEAR = {1989} +} + +@INPROCEEDINGS{Hue92, + AUTHOR = {G. Huet}, + BOOKTITLE = {Proceedings of 12th FST/TCS Conference, New Delhi}, + PAGES = {229--240}, + PUBLISHER = {Springer Verlag}, + SERIES = {LNCS}, + TITLE = {{The Gallina Specification Language : A case study}}, + VOLUME = {652}, + YEAR = {1992} +} + +@ARTICLE{Hue94, + AUTHOR = {G. Huet}, + JOURNAL = {J. Functional Programming}, + PAGES = {371--394}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Residual theory in $\lambda$-calculus: a formal development}, + VOLUME = {4,3}, + YEAR = {1994} +} + +@ARTICLE{KeWe84, + AUTHOR = {J. Ketonen and R. Weyhrauch}, + JOURNAL = {Theoretical Computer Science}, + PAGES = {297--307}, + TITLE = {A decidable fragment of {P}redicate {C}alculus}, + VOLUME = {32}, + YEAR = {1984} +} + +@BOOK{Kle52, + AUTHOR = {S.C. Kleene}, + PUBLISHER = {North-Holland}, + SERIES = {Bibliotheca Mathematica}, + TITLE = {Introduction to Metamathematics}, + YEAR = {1952} +} + +@BOOK{Kri90, + AUTHOR = {J.-L. Krivine}, + PUBLISHER = {Masson}, + SERIES = {Etudes et recherche en informatique}, + TITLE = {Lambda-calcul {types et mod\`eles}}, + YEAR = {1990} +} + +@ARTICLE{Laville91, + AUTHOR = {A. Laville}, + TITLE = {Comparison of Priority Rules in Pattern +Matching and Term Rewriting}, + JOURNAL = {Journal of Symbolic Computation}, + VOLUME = {11}, + PAGES = {321--347}, + YEAR = {1991} +} + +@BOOK{LE92, + EDITOR = {G. Huet and G. Plotkin}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Logical Environments}, + YEAR = {1992} +} + +@INPROCEEDINGS{LePa94, + AUTHOR = {F. Leclerc and C. Paulin-Mohring}, + BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, + EDITOR = {H. Barendregt and T. Nipkow}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, + VOLUME = {806}, + YEAR = {1994} +} + +@BOOK{LF91, + EDITOR = {G. Huet and G. Plotkin}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Logical Frameworks}, + YEAR = {1991} +} + +@BOOK{MaL84, + AUTHOR = {{P. Martin-L\"of}}, + PUBLISHER = {Bibliopolis}, + SERIES = {Studies in Proof Theory}, + TITLE = {Intuitionistic Type Theory}, + YEAR = {1984} +} + +@INPROCEEDINGS{manoury94, + AUTHOR = {P. Manoury}, + TITLE = {{A User's Friendly Syntax to Define +Recursive Functions as Typed $\lambda-$Terms}}, + BOOKTITLE = {{Types for Proofs and Programs, TYPES'94}}, + SERIES = {LNCS}, + VOLUME = {996}, + MONTH = jun, + YEAR = {1994} +} + +@ARTICLE{MaSi94, + AUTHOR = {P. Manoury and M. Simonot}, + JOURNAL = {TCS}, + TITLE = {Automatizing termination proof of recursively defined function}, + YEAR = {To appear} +} + +@TECHREPORT{maranget94, + AUTHOR = {L. Maranget}, + INSTITUTION = {INRIA}, + NUMBER = {2385}, + TITLE = {{Two Techniques for Compiling Lazy Pattern Matching}}, + YEAR = {1994} +} + +@INPROCEEDINGS{Moh89a, + AUTHOR = {C. Paulin-Mohring}, + ADDRESS = {Austin}, + BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, + MONTH = jan, + PUBLISHER = {ACM}, + TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, + YEAR = {1989} +} + +@PHDTHESIS{Moh89b, + AUTHOR = {C. Paulin-Mohring}, + MONTH = jan, + SCHOOL = {{Universit\'e Paris 7}}, + TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, + YEAR = {1989} +} + +@INPROCEEDINGS{Moh93, + AUTHOR = {C. Paulin-Mohring}, + BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi and Applications}, + EDITOR = {M. Bezem and J.-F. Groote}, + NOTE = {Also LIP research report 92-49, ENS Lyon}, + NUMBER = {664}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{Inductive Definitions in the System Coq - Rules and Properties}}, + YEAR = {1993} +} + +@MASTERSTHESIS{Mun94, + AUTHOR = {C. Mu\~noz}, + MONTH = sep, + SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + TITLE = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, + YEAR = {1994} +} + +@BOOK{Nijmegen93, + EDITOR = {H. Barendregt and T. Nipkow}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {Types for Proofs and Programs}, + VOLUME = {806}, + YEAR = {1994} +} + +@BOOK{NoPS90, + AUTHOR = {B. {Nordstr\"om} and K. Peterson and J. Smith}, + BOOKTITLE = {Information Processing 83}, + PUBLISHER = {Oxford Science Publications}, + SERIES = {International Series of Monographs on Computer Science}, + TITLE = {Programming in {Martin-L\"of's} Type Theory}, + YEAR = {1990} +} + +@ARTICLE{Nor88, + AUTHOR = {B. {Nordstr\"om}}, + JOURNAL = {BIT}, + TITLE = {Terminating General Recursion}, + VOLUME = {28}, + YEAR = {1988} +} + +@BOOK{Odi90, + EDITOR = {P. Odifreddi}, + PUBLISHER = {Academic Press}, + TITLE = {Logic and Computer Science}, + YEAR = {1990} +} + +@INPROCEEDINGS{PaMS92, + AUTHOR = {M. Parigot and P. Manoury and M. Simonot}, + ADDRESS = {St. Petersburg, Russia}, + BOOKTITLE = {Logic Programming and automated reasoning}, + EDITOR = {A. Voronkov}, + MONTH = jul, + NUMBER = {624}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{ProPre : A Programming language with proofs}}, + YEAR = {1992} +} + +@ARTICLE{Par92, + AUTHOR = {M. Parigot}, + JOURNAL = {Theoretical Computer Science}, + NUMBER = {2}, + PAGES = {335--356}, + TITLE = {{Recursive Programming with Proofs}}, + VOLUME = {94}, + YEAR = {1992} +} + +@INPROCEEDINGS{Parent95b, + AUTHOR = {C. Parent}, + BOOKTITLE = {{Mathematics of Program Construction'95}}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{Synthesizing proofs from programs in +the Calculus of Inductive Constructions}}, + VOLUME = {947}, + YEAR = {1995} +} + +@ARTICLE{PaWe92, + AUTHOR = {C. Paulin-Mohring and B. Werner}, + JOURNAL = {Journal of Symbolic Computation}, + PAGES = {607--640}, + TITLE = {{Synthesis of ML programs in the system Coq}}, + VOLUME = {15}, + YEAR = {1993} +} + +@INPROCEEDINGS{Prasad93, + AUTHOR = {K.V. Prasad}, + BOOKTITLE = {{Proceedings of CONCUR'93}}, + PUBLISHER = {Springer-Verlag}, + SERIES = {LNCS}, + TITLE = {{Programming with broadcasts}}, + VOLUME = {715}, + YEAR = {1993} +} + +@INPROCEEDINGS{puel-suarez90, + AUTHOR = {L.Puel and A. Su\'arez}, + BOOKTITLE = {{Conference Lisp and Functional Programming}}, + SERIES = {ACM}, + PUBLISHER = {Springer-Verlag}, + TITLE = {{Compiling Pattern Matching by Term +Decomposition}}, + YEAR = {1990} +} + +@UNPUBLISHED{Rou92, + AUTHOR = {J. Rouyer}, + MONTH = aug, + NOTE = {To appear as a technical report}, + TITLE = {{D\'eveloppement de l'Algorithme d'Unification dans le Calcul des Constructions}}, + YEAR = {1992} +} + +@TECHREPORT{Saibi94, + AUTHOR = {A. Sa\"{\i}bi}, + INSTITUTION = {INRIA}, + MONTH = dec, + NUMBER = {2345}, + TITLE = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, + YEAR = {1994} +} + +@MASTERSTHESIS{saidi94, + AUTHOR = {H. Saidi}, + MONTH = sep, + SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + TITLE = {R\'esolution d'\'equations dans le syst\`eme T + de G\"odel}, + YEAR = {1994} +} + +@MASTERSTHESIS{Ter92, + AUTHOR = {D. Terrasse}, + MONTH = sep, + SCHOOL = {IARFA}, + TITLE = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, + YEAR = {1992} +} + +@TECHREPORT{ThBeKa92, + AUTHOR = {L. Th\'ery and Y. Bertot and G. Kahn}, + INSTITUTION = {INRIA Sophia}, + MONTH = may, + NUMBER = {1684}, + TITLE = {Real theorem provers deserve real user-interfaces}, + TYPE = {Research Report}, + YEAR = {1992} +} + +@BOOK{TrDa89, + AUTHOR = {A.S. Troelstra and D. van Dalen}, + PUBLISHER = {North-Holland}, + SERIES = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, + TITLE = {Constructivism in Mathematics, an introduction}, + YEAR = {1988} +} + +@INCOLLECTION{wadler87, + AUTHOR = {P. Wadler}, + TITLE = {Efficient Compilation of Pattern Matching}, + BOOKTITLE = {The Implementation of Functional Programming +Languages}, + EDITOR = {S.L. Peyton Jones}, + PUBLISHER = {Prentice-Hall}, + YEAR = {1987} +} + +@PHDTHESIS{Wer94, + AUTHOR = {B. Werner}, + SCHOOL = {Universit\'e Paris 7}, + TITLE = {Une th\'eorie des constructions inductives}, + TYPE = {Th\`ese de Doctorat}, + YEAR = {1994} +} + + diff --git a/doc/RecTutorial/morebib.bib b/doc/RecTutorial/morebib.bib new file mode 100644 index 00000000..11dde2cd --- /dev/null +++ b/doc/RecTutorial/morebib.bib @@ -0,0 +1,55 @@ +@book{coqart, + title = "Interactive Theorem Proving and Program Development. + Coq'Art: The Calculus of Inductive Constructions", + author = "Yves Bertot and Pierre Castéran", + publisher = "Springer Verlag", + series = "Texts in Theoretical Computer Science. An EATCS series", + year = 2004 +} + +@Article{Coquand:Huet, + author = {Thierry Coquand and Gérard Huet}, + title = {The Calculus of Constructions}, + journal = {Information and Computation}, + year = {1988}, + volume = {76}, +} + +@INcollection{Coquand:metamathematical, + author = "Thierry Coquand", + title = "Metamathematical Investigations on a Calculus of Constructions", + booktitle="Logic and Computer Science", + year = {1990}, + editor="P. Odifreddi", + publisher = "Academic Press", +} + +@Misc{coqrefman, + title = {The {C}oq reference manual}, + author={{C}oq {D}evelopment Team}, + note= {LogiCal Project, \texttt{http://coq.inria.fr/}} + } + +@Misc{coqsite, + author= {{C}oq {D}evelopment Team}, + title = {The \emph{Coq} proof assistant}, + note = {Documentation, system download. {C}ontact: \texttt{http://coq.inria.fr/}} +} + + + +@Misc{Booksite, + author = {Yves Bertot and Pierre Cast\'eran}, + title = {Coq'{A}rt: examples and exercises}, + note = {\url{http://www.labri.fr/Perso/~casteran/CoqArt}} +} + + +@InProceedings{conor:motive, + author ="Conor McBride", + title = "Elimination with a motive", + booktitle = "Types for Proofs and Programs'2000", + volume = 2277, + pages = "197-217", + year = "2002", +} diff --git a/doc/RecTutorial/recmacros.tex b/doc/RecTutorial/recmacros.tex new file mode 100644 index 00000000..0334553f --- /dev/null +++ b/doc/RecTutorial/recmacros.tex @@ -0,0 +1,75 @@ +%=================================== +% Style of the document +%=================================== +%\newtheorem{example}{Example}[section] +%\newtheorem{exercise}{Exercise}[section] + + +\newcommand{\comentario}[1]{\texttt{#1}} + +%=================================== +% Keywords +%=================================== + +\newcommand{\Prop}{\texttt{Prop}} +\newcommand{\Set}{\texttt{Set}} +\newcommand{\Type}{\texttt{Type}} +\newcommand{\true}{\texttt{true}} +\newcommand{\false}{\texttt{false}} +\newcommand{\Lth}{\texttt{Lth}} + +\newcommand{\Nat}{\texttt{nat}} +\newcommand{\nat}{\texttt{nat}} +\newcommand{\Z} {\texttt{O}} +\newcommand{\SUCC}{\texttt{S}} +\newcommand{\pred}{\texttt{pred}} + +\newcommand{\False}{\texttt{False}} +\newcommand{\True}{\texttt{True}} +\newcommand{\I}{\texttt{I}} + +\newcommand{\natind}{\texttt{nat\_ind}} +\newcommand{\natrec}{\texttt{nat\_rec}} +\newcommand{\natrect}{\texttt{nat\_rect}} + +\newcommand{\eqT}{\texttt{eqT}} +\newcommand{\identityT}{\texttt{identityT}} + +\newcommand{\map}{\texttt{map}} +\newcommand{\iterates}{\texttt{iterates}} + + +%=================================== +% Numbering +%=================================== + + +\newtheorem{definition}{Definition}[section] +\newtheorem{example}{Example}[section] + + +%=================================== +% Judgements +%=================================== + + +\newcommand{\JM}[2]{\ensuremath{#1 : #2}} + +%=================================== +% Expressions +%=================================== + +\newcommand{\Case}[3][]{\ensuremath{#1\textsf{Case}~#2~\textsf of}~#3~\textsf{end}} + +%======================================= + +\newcommand{\snreglados} [3] {\begin{tabular}{c} \ensuremath{#1} \\[2pt] + \ensuremath{#2}\\ \hline \ensuremath{#3} \end{tabular}} + + +\newcommand{\snregla} [2] {\begin{tabular}{c} + \ensuremath{#1}\\ \hline \ensuremath{#2} \end{tabular}} + + +%======================================= + diff --git a/doc/common/macros.tex b/doc/common/macros.tex new file mode 100755 index 00000000..393b8547 --- /dev/null +++ b/doc/common/macros.tex @@ -0,0 +1,497 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% MACROS FOR THE REFERENCE MANUAL OF COQ % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% For commentaries (define \com as {} for the release manual) +%\newcommand{\com}[1]{{\it(* #1 *)}} +%\newcommand{\com}[1]{} + +%%OPTIONS for HACHA +%\renewcommand{\cuttingunit}{section} + + +%BEGIN LATEX +\newenvironment{centerframe}% +{\bgroup +\dimen0=\textwidth +\advance\dimen0 by -2\fboxrule +\advance\dimen0 by -2\fboxsep +\setbox0=\hbox\bgroup +\begin{minipage}{\dimen0}% +\begin{center}}% +{\end{center}% +\end{minipage}\egroup +\centerline{\fbox{\box0}}\egroup +} +%END LATEX +%HEVEA \newenvironment{centerframe}{\begin{center}}{\end{center}} + +%HEVEA \newcommand{\vec}[1]{\mathbf{#1}} +%HEVEA \newcommand{\ominus}{-} +%HEVEA \renewcommand{\oplus}{+} +%HEVEA \renewcommand{\otimes}{\times} +%HEVEA \newcommand{\land}{\wedge} +%HEVEA \newcommand{\lor}{\vee} +%HEVEA \newcommand{\k}[1]{#1} +%HEVEA \newcommand{\phantom}[1]{\qquad} + +%%%%%%%%%%%%%%%%%%%%%%% +% Formatting commands % +%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\ErrMsg}{\medskip \noindent {\bf Error message: }} +\newcommand{\ErrMsgx}{\medskip \noindent {\bf Error messages: }} +\newcommand{\variant}{\medskip \noindent {\bf Variant: }} +\newcommand{\variants}{\medskip \noindent {\bf Variants: }} +\newcommand{\SeeAlso}{\medskip \noindent {\bf See also: }} +\newcommand{\Rem}{\medskip \noindent {\bf Remark: }} +\newcommand{\Rems}{\medskip \noindent {\bf Remarks: }} +\newcommand{\Example}{\medskip \noindent {\bf Example: }} +\newcommand{\Warning}{\medskip \noindent {\bf Warning: }} +\newcommand{\Warns}{\medskip \noindent {\bf Warnings: }} +\newcounter{ex} +\newcommand{\firstexample}{\setcounter{ex}{1}} +\newcommand{\example}[1]{ +\medskip \noindent \textbf{Example \arabic{ex}: }\textit{#1} +\addtocounter{ex}{1}} + +\newenvironment{Variant}{\variant\begin{enumerate}}{\end{enumerate}} +\newenvironment{Variants}{\variants\begin{enumerate}}{\end{enumerate}} +\newenvironment{ErrMsgs}{\ErrMsgx\begin{enumerate}}{\end{enumerate}} +\newenvironment{Remarks}{\Rems\begin{enumerate}}{\end{enumerate}} +\newenvironment{Warnings}{\Warns\begin{enumerate}}{\end{enumerate}} +\newenvironment{Examples}{\medskip\noindent{\bf Examples:} +\begin{enumerate}}{\end{enumerate}} + +%\newcommand{\bd}{\noindent\bf} +%\newcommand{\sbd}{\vspace{8pt}\noindent\bf} +%\newcommand{\sdoll}[1]{\begin{small}$ #1~ $\end{small}} +%\newcommand{\sdollnb}[1]{\begin{small}$ #1 $\end{small}} +\newcommand{\kw}[1]{\textsf{#1}} +%\newcommand{\spec}[1]{\{\,#1\,\}} + +% Building regular expressions +\newcommand{\zeroone}[1]{{\sl [}#1{\sl ]}} +%\newcommand{\zeroonemany}[1]{$\{$#1$\}$*} +%\newcommand{\onemany}[1]{$\{$#1$\}$+} +\newcommand{\nelist}[2]{{#1} {\tt #2} {\ldots} {\tt #2} {#1}} +\newcommand{\sequence}[2]{{\sl [}{#1} {\tt #2} {\ldots} {\tt #2} {#1}{\sl ]}} +\newcommand{\nelistwithoutblank}[2]{#1{\tt #2}\ldots{\tt #2}#1} +\newcommand{\sequencewithoutblank}[2]{$[$#1{\tt #2}\ldots{\tt #2}#1$]$} + +% Used for RefMan-gal +%\newcommand{\ml}[1]{\hbox{\tt{#1}}} +%\newcommand{\op}{\,|\,} + +%%%%%%%%%%%%%%%%%%%%%%%% +% Trademarks and so on % +%%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\Coq}{\textsc{Coq}} +\newcommand{\gallina}{\textsc{Gallina}} +\newcommand{\Gallina}{\textsc{Gallina}} +\newcommand{\CoqIDE}{\textsc{CoqIDE}} +\newcommand{\ocaml}{\textsc{Objective Caml}} +\newcommand{\camlpppp}{\textsc{Camlp4}} +\newcommand{\emacs}{\textsc{GNU Emacs}} +\newcommand{\CIC}{\pCIC} +\newcommand{\pCIC}{p\textsc{Cic}} +\newcommand{\iCIC}{\textsc{Cic}} +\newcommand{\FW}{\ensuremath{F_{\omega}}} +%\newcommand{\bn}{{\sf BNF}} + +%%%%%%%%%%%%%%%%%%% +% Name of tactics % +%%%%%%%%%%%%%%%%%%% + +%\newcommand{\Natural}{\mbox{\tt Natural}} + +%%%%%%%%%%%%%%%%% +% \rm\sl series % +%%%%%%%%%%%%%%%%% + +\newcommand{\nterm}[1]{\textrm{\textsl{#1}}} + +\newcommand{\qstring}{\nterm{string}} + +%% New syntax specific entries +\newcommand{\annotation}{\nterm{annotation}} +\newcommand{\assums}{\nterm{assums}} % vernac +\newcommand{\simpleassums}{\nterm{simple\_assums}} % assumptions +\newcommand{\binder}{\nterm{binder}} +\newcommand{\binderlet}{\nterm{binderlet}} +\newcommand{\binderlist}{\nterm{binderlist}} +\newcommand{\caseitems}{\nterm{match\_items}} +\newcommand{\caseitem}{\nterm{match\_item}} +\newcommand{\eqn}{\nterm{equation}} +\newcommand{\ifitem}{\nterm{dep\_ret\_type}} +\newcommand{\letclauses}{\nterm{letclauses}} +\newcommand{\params}{\nterm{params}} % vernac +\newcommand{\returntype}{\nterm{return\_type}} +\newcommand{\idparams}{\nterm{ident\_with\_params}} +\newcommand{\statkwd}{\nterm{statement\_keyword}} % vernac +\newcommand{\termarg}{\nterm{arg}} + +\newcommand{\typecstr}{\zeroone{{\tt :} {\term}}} + + +\newcommand{\Fwterm}{\textrm{\textsl{Fwterm}}} +\newcommand{\Index}{\textrm{\textsl{index}}} +\newcommand{\abbrev}{\textrm{\textsl{abbreviation}}} +\newcommand{\atomictac}{\textrm{\textsl{atomic\_tactic}}} +\newcommand{\bindinglist}{\textrm{\textsl{bindings\_list}}} +\newcommand{\cast}{\textrm{\textsl{cast}}} +\newcommand{\cofixpointbodies}{\textrm{\textsl{cofix\_bodies}}} +\newcommand{\cofixpointbody}{\textrm{\textsl{cofix\_body}}} +\newcommand{\commandtac}{\textrm{\textsl{tactic\_invocation}}} +\newcommand{\constructor}{\textrm{\textsl{constructor}}} +\newcommand{\convtactic}{\textrm{\textsl{conv\_tactic}}} +\newcommand{\declarationkeyword}{\textrm{\textsl{declaration\_keyword}}} +\newcommand{\declaration}{\textrm{\textsl{declaration}}} +\newcommand{\definition}{\textrm{\textsl{definition}}} +\newcommand{\digit}{\textrm{\textsl{digit}}} +\newcommand{\exteqn}{\textrm{\textsl{ext\_eqn}}} +\newcommand{\field}{\textrm{\textsl{field}}} +\newcommand{\firstletter}{\textrm{\textsl{first\_letter}}} +\newcommand{\fixpg}{\textrm{\textsl{fix\_pgm}}} +\newcommand{\fixpointbodies}{\textrm{\textsl{fix\_bodies}}} +\newcommand{\fixpointbody}{\textrm{\textsl{fix\_body}}} +\newcommand{\fixpoint}{\textrm{\textsl{fixpoint}}} +\newcommand{\flag}{\textrm{\textsl{flag}}} +\newcommand{\form}{\textrm{\textsl{form}}} +\newcommand{\entry}{\textrm{\textsl{entry}}} +\newcommand{\proditem}{\textrm{\textsl{production\_item}}} +\newcommand{\tacargtype}{\textrm{\textsl{tactic\_argument\_type}}} +\newcommand{\scope}{\textrm{\textsl{scope}}} +\newcommand{\optscope}{\textrm{\textsl{opt\_scope}}} +\newcommand{\declnotation}{\textrm{\textsl{decl\_notation}}} +\newcommand{\symbolentry}{\textrm{\textsl{symbol}}} +\newcommand{\modifiers}{\textrm{\textsl{modifiers}}} +\newcommand{\localdef}{\textrm{\textsl{local\_def}}} +\newcommand{\localdecls}{\textrm{\textsl{local\_decls}}} +\newcommand{\ident}{\textrm{\textsl{ident}}} +\newcommand{\accessident}{\textrm{\textsl{access\_ident}}} +\newcommand{\inductivebody}{\textrm{\textsl{ind\_body}}} +\newcommand{\inductive}{\textrm{\textsl{inductive}}} +\newcommand{\naturalnumber}{\textrm{\textsl{natural}}} +\newcommand{\integer}{\textrm{\textsl{integer}}} +\newcommand{\multpattern}{\textrm{\textsl{mult\_pattern}}} +\newcommand{\mutualcoinductive}{\textrm{\textsl{mutual\_coinductive}}} +\newcommand{\mutualinductive}{\textrm{\textsl{mutual\_inductive}}} +\newcommand{\nestedpattern}{\textrm{\textsl{nested\_pattern}}} +\newcommand{\name}{\textrm{\textsl{name}}} +\newcommand{\num}{\textrm{\textsl{num}}} +\newcommand{\pattern}{\textrm{\textsl{pattern}}} +\newcommand{\intropattern}{\textrm{\textsl{intro\_pattern}}} +\newcommand{\pat}{\textrm{\textsl{pat}}} +\newcommand{\pgs}{\textrm{\textsl{pgms}}} +\newcommand{\pg}{\textrm{\textsl{pgm}}} +%BEGIN LATEX +\newcommand{\proof}{\textrm{\textsl{proof}}} +%END LATEX +%HEVEA \renewcommand{\proof}{\textrm{\textsl{proof}}} +\newcommand{\record}{\textrm{\textsl{record}}} +\newcommand{\rewrule}{\textrm{\textsl{rewriting\_rule}}} +\newcommand{\sentence}{\textrm{\textsl{sentence}}} +\newcommand{\simplepattern}{\textrm{\textsl{simple\_pattern}}} +\newcommand{\sort}{\textrm{\textsl{sort}}} +\newcommand{\specif}{\textrm{\textsl{specif}}} +\newcommand{\statement}{\textrm{\textsl{statement}}} +\newcommand{\str}{\textrm{\textsl{string}}} +\newcommand{\subsequentletter}{\textrm{\textsl{subsequent\_letter}}} +\newcommand{\switch}{\textrm{\textsl{switch}}} +\newcommand{\tac}{\textrm{\textsl{tactic}}} +\newcommand{\terms}{\textrm{\textsl{terms}}} +\newcommand{\term}{\textrm{\textsl{term}}} +\newcommand{\module}{\textrm{\textsl{module}}} +\newcommand{\modexpr}{\textrm{\textsl{module\_expression}}} +\newcommand{\modtype}{\textrm{\textsl{module\_type}}} +\newcommand{\onemodbinding}{\textrm{\textsl{module\_binding}}} +\newcommand{\modbindings}{\textrm{\textsl{module\_bindings}}} +\newcommand{\qualid}{\textrm{\textsl{qualid}}} +\newcommand{\class}{\textrm{\textsl{class}}} +\newcommand{\dirpath}{\textrm{\textsl{dirpath}}} +\newcommand{\typedidents}{\textrm{\textsl{typed\_idents}}} +\newcommand{\type}{\textrm{\textsl{type}}} +\newcommand{\vref}{\textrm{\textsl{ref}}} +\newcommand{\zarithformula}{\textrm{\textsl{zarith\_formula}}} +\newcommand{\zarith}{\textrm{\textsl{zarith}}} +\newcommand{\ltac}{\mbox{${\cal L}_{tac}$}} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% \mbox{\sf } series for roman text in maths formulas % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\alors}{\mbox{\textsf{then}}} +\newcommand{\alter}{\mbox{\textsf{alter}}} +\newcommand{\bool}{\mbox{\textsf{bool}}} +\newcommand{\conc}{\mbox{\textsf{conc}}} +\newcommand{\cons}{\mbox{\textsf{cons}}} +\newcommand{\consf}{\mbox{\textsf{consf}}} +\newcommand{\emptyf}{\mbox{\textsf{emptyf}}} +\newcommand{\EqSt}{\mbox{\textsf{EqSt}}} +\newcommand{\false}{\mbox{\textsf{false}}} +\newcommand{\filter}{\mbox{\textsf{filter}}} +\newcommand{\forest}{\mbox{\textsf{forest}}} +\newcommand{\from}{\mbox{\textsf{from}}} +\newcommand{\hd}{\mbox{\textsf{hd}}} +\newcommand{\Length}{\mbox{\textsf{Length}}} +\newcommand{\length}{\mbox{\textsf{length}}} +\newcommand{\LengthA}{\mbox {\textsf{Length\_A}}} +\newcommand{\List}{\mbox{\textsf{List}}} +\newcommand{\ListA}{\mbox{\textsf{List\_A}}} +\newcommand{\LNil}{\mbox{\textsf{Lnil}}} +\newcommand{\LCons}{\mbox{\textsf{Lcons}}} +\newcommand{\nat}{\mbox{\textsf{nat}}} +\newcommand{\nO}{\mbox{\textsf{O}}} +\newcommand{\nS}{\mbox{\textsf{S}}} +\newcommand{\node}{\mbox{\textsf{node}}} +\newcommand{\Nil}{\mbox{\textsf{nil}}} +\newcommand{\Prop}{\mbox{\textsf{Prop}}} +\newcommand{\Set}{\mbox{\textsf{Set}}} +\newcommand{\si}{\mbox{\textsf{if}}} +\newcommand{\sinon}{\mbox{\textsf{else}}} +\newcommand{\Str}{\mbox{\textsf{Stream}}} +\newcommand{\tl}{\mbox{\textsf{tl}}} +\newcommand{\tree}{\mbox{\textsf{tree}}} +\newcommand{\true}{\mbox{\textsf{true}}} +\newcommand{\Type}{\mbox{\textsf{Type}}} +\newcommand{\unfold}{\mbox{\textsf{unfold}}} +\newcommand{\zeros}{\mbox{\textsf{zeros}}} + +%%%%%%%%% +% Misc. % +%%%%%%%%% +\newcommand{\T}{\texttt{T}} +\newcommand{\U}{\texttt{U}} +\newcommand{\real}{\textsf{Real}} +\newcommand{\Spec}{\textit{Spec}} +\newcommand{\Data}{\textit{Data}} +\newcommand{\In} {{\textbf{in }}} +\newcommand{\AND} {{\textbf{and}}} +\newcommand{\If}{{\textbf{if }}} +\newcommand{\Else}{{\textbf{else }}} +\newcommand{\Then} {{\textbf{then }}} +\newcommand{\Let}{{\textbf{let }}} +\newcommand{\Where}{{\textbf{where rec }}} +\newcommand{\Function}{{\textbf{function }}} +\newcommand{\Rec}{{\textbf{rec }}} +%\newcommand{\cn}{\centering} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Math commands and symbols % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\la}{\leftarrow} +\newcommand{\ra}{\rightarrow} +\newcommand{\Ra}{\Rightarrow} +\newcommand{\rt}{\Rightarrow} +\newcommand{\lla}{\longleftarrow} +\newcommand{\lra}{\longrightarrow} +\newcommand{\Llra}{\Longleftrightarrow} +\newcommand{\mt}{\mapsto} +\newcommand{\ov}{\overrightarrow} +\newcommand{\wh}{\widehat} +\newcommand{\up}{\uparrow} +\newcommand{\dw}{\downarrow} +\newcommand{\nr}{\nearrow} +\newcommand{\se}{\searrow} +\newcommand{\sw}{\swarrow} +\newcommand{\nw}{\nwarrow} +\newcommand{\mto}{,} + +\newcommand{\vm}[1]{\vspace{#1em}} +\newcommand{\vx}[1]{\vspace{#1ex}} +\newcommand{\hm}[1]{\hspace{#1em}} +\newcommand{\hx}[1]{\hspace{#1ex}} +\newcommand{\sm}{\mbox{ }} +\newcommand{\mx}{\mbox} + +%\newcommand{\nq}{\neq} +%\newcommand{\eq}{\equiv} +\newcommand{\fa}{\forall} +%\newcommand{\ex}{\exists} +\newcommand{\impl}{\rightarrow} +%\newcommand{\Or}{\vee} +%\newcommand{\And}{\wedge} +\newcommand{\ms}{\models} +\newcommand{\bw}{\bigwedge} +\newcommand{\ts}{\times} +\newcommand{\cc}{\circ} +%\newcommand{\es}{\emptyset} +%\newcommand{\bs}{\backslash} +\newcommand{\vd}{\vdash} +%\newcommand{\lan}{{\langle }} +%\newcommand{\ran}{{\rangle }} + +%\newcommand{\al}{\alpha} +\newcommand{\bt}{\beta} +%\newcommand{\io}{\iota} +\newcommand{\lb}{\lambda} +%\newcommand{\sg}{\sigma} +%\newcommand{\sa}{\Sigma} +%\newcommand{\om}{\Omega} +%\newcommand{\tu}{\tau} + +%%%%%%%%%%%%%%%%%%%%%%%%% +% Custom maths commands % +%%%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\sumbool}[2]{\{#1\}+\{#2\}} +\newcommand{\myifthenelse}[3]{\kw{if} ~ #1 ~\kw{then} ~ #2 ~ \kw{else} ~ #3} +\newcommand{\fun}[2]{\item[]{\tt {#1}}. \quad\\ #2} +\newcommand{\WF}[2]{\ensuremath{{\cal W\!F}(#1)[#2]}} +\newcommand{\WFE}[1]{\WF{E}{#1}} +\newcommand{\WT}[4]{\ensuremath{#1[#2] \vdash #3 : #4}} +\newcommand{\WTE}[3]{\WT{E}{#1}{#2}{#3}} +\newcommand{\WTEG}[2]{\WTE{\Gamma}{#1}{#2}} + +\newcommand{\WTM}[3]{\WT{#1}{}{#2}{#3}} +\newcommand{\WFT}[2]{\ensuremath{#1[] \vdash {\cal W\!F}(#2)}} +\newcommand{\WS}[3]{\ensuremath{#1[] \vdash #2 <: #3}} +\newcommand{\WSE}[2]{\WS{E}{#1}{#2}} + +\newcommand{\WTRED}[5]{\mbox{$#1[#2] \vdash #3 #4 #5$}} +\newcommand{\WTERED}[4]{\mbox{$E[#1] \vdash #2 #3 #4$}} +\newcommand{\WTELECONV}[3]{\WTERED{#1}{#2}{\leconvert}{#3}} +\newcommand{\WTEGRED}[3]{\WTERED{\Gamma}{#1}{#2}{#3}} +\newcommand{\WTECONV}[3]{\WTERED{#1}{#2}{\convert}{#3}} +\newcommand{\WTEGCONV}[2]{\WTERED{\Gamma}{#1}{\convert}{#2}} +\newcommand{\WTEGLECONV}[2]{\WTERED{\Gamma}{#1}{\leconvert}{#2}} + +\newcommand{\lab}[1]{\mathit{labels}(#1)} +\newcommand{\dom}[1]{\mathit{dom}(#1)} + +\newcommand{\CI}[2]{\mbox{$\{#1\}^{#2}$}} +\newcommand{\CIP}[3]{\mbox{$\{#1\}_{#2}^{#3}$}} +\newcommand{\CIPV}[1]{\CIP{#1}{I_1.. I_k}{P_1.. P_k}} +\newcommand{\CIPI}[1]{\CIP{#1}{I}{P}} +\newcommand{\CIF}[1]{\mbox{$\{#1\}_{f_1.. f_n}$}} +%BEGIN LATEX +\newcommand{\NInd}[3]{\mbox{{\sf Ind}$(#1)(\begin{array}[t]{@{}l}#2:=#3 + \,)\end{array}$}} +\newcommand{\Ind}[4]{\mbox{{\sf Ind}$(#1)[#2](\begin{array}[t]{@{}l@{}}#3:=#4 + \,)\end{array}$}} +%END LATEX +%HEVEA \newcommand{\NInd}[3]{\mbox{{\sf Ind}$(#1)(#2:=#3\,)$}} +%HEVEA \newcommand{\Ind}[4]{\mbox{{\sf Ind}$(#1)[#2](#3:=#4\,)$}} + +\newcommand{\Indp}[5]{\mbox{{\sf Ind}$_{#5}(#1)[#2](\begin{array}[t]{@{}l}#3:=#4 + \,)\end{array}$}} +\newcommand{\Def}[4]{\mbox{{\sf Def}$(#1)(#2:=#3:#4)$}} +\newcommand{\Assum}[3]{\mbox{{\sf Assum}$(#1)(#2:#3)$}} +\newcommand{\Match}[3]{\mbox{$<\!#1\!>\!{\mbox{\tt Match}}~#2~{\mbox{\tt with}}~#3~{\mbox{\tt end}}$}} +\newcommand{\Case}[3]{\mbox{$\kw{case}(#2,#1,#3)$}} +\newcommand{\match}[3]{\mbox{$\kw{match}~ #2 ~\kw{with}~ #3 ~\kw{end}$}} +\newcommand{\Fix}[2]{\mbox{\tt Fix}~#1\{#2\}} +\newcommand{\CoFix}[2]{\mbox{\tt CoFix}~#1\{#2\}} +\newcommand{\With}[2]{\mbox{\tt ~with~}} +\newcommand{\subst}[3]{#1\{#2/#3\}} +\newcommand{\substs}[4]{#1\{(#2/#3)_{#4}\}} +\newcommand{\Sort}{\mbox{$\cal S$}} +\newcommand{\convert}{=_{\beta\delta\iota\zeta}} +\newcommand{\leconvert}{\leq_{\beta\delta\iota\zeta}} +\newcommand{\NN}{\mathbb{N}} +\newcommand{\inference}[1]{$${#1}$$} + +\newcommand{\compat}[2]{\mbox{$[#1|#2]$}} +\newcommand{\tristackrel}[3]{\mathrel{\mathop{#2}\limits_{#3}^{#1}}} + +\newcommand{\Impl}{{\it Impl}} +\newcommand{\Mod}[3]{{\sf Mod}({#1}:{#2}:={#3})} +\newcommand{\ModType}[2]{{\sf ModType}({#1}:={#2})} +\newcommand{\ModS}[2]{{\sf ModS}({#1}:{#2})} +\newcommand{\ModSEq}[3]{{\sf ModSEq}({#1}:{#2}=={#3})} +\newcommand{\functor}[3]{\ensuremath{{\sf Functor}(#1:#2)\;#3}} +\newcommand{\funsig}[3]{\ensuremath{{\sf Funsig}(#1:#2)\;#3}} +\newcommand{\sig}[1]{\ensuremath{{\sf Sig}~#1~{\sf End}}} +\newcommand{\struct}[1]{\ensuremath{{\sf Struct}~#1~{\sf End}}} + + +%\newbox\tempa +%\newbox\tempb +%\newdimen\tempc +%\newcommand{\mud}[1]{\hfil $\displaystyle{\mathstrut #1}$\hfil} +%\newcommand{\rig}[1]{\hfil $\displaystyle{#1}$} +% \newcommand{\irulehelp}[3]{\setbox\tempa=\hbox{$\displaystyle{\mathstrut #2}$}% +% \setbox\tempb=\vbox{\halign{##\cr +% \mud{#1}\cr +% \noalign{\vskip\the\lineskip} +% \noalign{\hrule height 0pt} +% \rig{\vbox to 0pt{\vss\hbox to 0pt{${\; #3}$\hss}\vss}}\cr +% \noalign{\hrule} +% \noalign{\vskip\the\lineskip} +% \mud{\copy\tempa}\cr}} +% \tempc=\wd\tempb +% \advance\tempc by \wd\tempa +% \divide\tempc by 2 } +% \newcommand{\irule}[3]{{\irulehelp{#1}{#2}{#3} +% \hbox to \wd\tempa{\hss \box\tempb \hss}}} + +\newcommand{\sverb}[1]{{\tt #1}} +\newcommand{\mover}[2]{{#1\over #2}} +\newcommand{\jd}[2]{#1 \vdash #2} +\newcommand{\mathline}[1]{\[#1\]} +\newcommand{\zrule}[2]{#2: #1} +\newcommand{\orule}[3]{#3: {\mover{#1}{#2}}} +\newcommand{\trule}[4]{#4: \mover{#1 \qquad #2} {#3}} +\newcommand{\thrule}[5]{#5: {\mover{#1 \qquad #2 \qquad #3}{#4}}} + + + +% placement of figures + +%BEGIN LATEX +\renewcommand{\topfraction}{.99} +\renewcommand{\bottomfraction}{.99} +\renewcommand{\textfraction}{.01} +\renewcommand{\floatpagefraction}{.9} +%END LATEX + +% Macros Bruno pour description de la syntaxe + +\def\bfbar{\ensuremath{|\hskip -0.22em{}|\hskip -0.24em{}|}} +\def\TERMbar{\bfbar} +\def\TERMbarbar{\bfbar\bfbar} + + +%% Macros pour les grammaires +\def\GR#1{\text{\large(}#1\text{\large)}} +\def\NT#1{\langle\textit{#1}\rangle} +\def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} +\def\TERM#1{{\bf\textrm{\bf #1}}} +%\def\TERM#1{{\bf\textsf{#1}}} +\def\KWD#1{\TERM{#1}} +\def\ETERM#1{\TERM{#1}} +\def\CHAR#1{\TERM{#1}} + +\def\STAR#1{#1*} +\def\STARGR#1{\GR{#1}*} +\def\PLUS#1{#1+} +\def\PLUSGR#1{\GR{#1}+} +\def\OPT#1{#1?} +\def\OPTGR#1{\GR{#1}?} +%% Tableaux de definition de non-terminaux +\newenvironment{cadre} + {\begin{array}{|c|}\hline\\} + {\\\\\hline\end{array}} +\newenvironment{rulebox} + {$$\begin{cadre}\begin{array}{r@{~}c@{~}l@{}l@{}r}} + {\end{array}\end{cadre}$$} +\def\DEFNT#1{\NT{#1} & ::= &} +\def\EXTNT#1{\NT{#1} & ::= & ... \\&|&} +\def\RNAME#1{(\textsc{#1})} +\def\SEPDEF{\\\\} +\def\nlsep{\\&|&} +\def\nlcont{\\&&} +\newenvironment{rules} + {\begin{center}\begin{rulebox}} + {\end{rulebox}\end{center}} + +% $Id: macros.tex 8606 2006-02-23 13:58:10Z herbelin $ + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/common/title.tex b/doc/common/title.tex new file mode 100755 index 00000000..2ed49ede --- /dev/null +++ b/doc/common/title.tex @@ -0,0 +1,86 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% File title.tex +% Page formatting commands +% Macro \coverpage +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%\setlength{\marginparwidth}{0pt} +%\setlength{\oddsidemargin}{0pt} +%\setlength{\evensidemargin}{0pt} +%\setlength{\marginparsep}{0pt} +%\setlength{\topmargin}{0pt} +%\setlength{\textwidth}{16.9cm} +%\setlength{\textheight}{22cm} +\usepackage{fullpage} + +\newcommand{\printingdate}{\today} +\newcommand{\isdraft}{\Large\bf\today\\[20pt]} +%\newcommand{\isdraft}{\vspace{20pt}} + +%To show the top for the toc in html +\newcommand{\tophtml}{} + +\newcommand{\coverpage}[3]{ +\thispagestyle{empty} +\begin{center} +\begin{Huge} +\begin{bf} +The Coq Proof Assistant\\ +\vspace{12pt} + #1\\ +\end{bf} +\end{Huge} +\vspace{20pt} +\isdraft +{\Large \bf Version \coqversion} +\footnote[1]{This research was partly supported by IST working group ``Types''} +\\ +\vspace{120pt} +{\bf #2}\\ +\vfill +{\Large \bf LogiCal Project}\\ +\vspace{15pt} +\end{center} +%BEGIN LATEX +\newpage +\vspace*{500pt} +\thispagestyle{empty} +%END LATEX +\begin{flushleft} +%BEGIN LATEX +{\large{V\coqversion, +\printingdate}}\\[20pt] +%END LATEX +{\large{\copyright INRIA 1999-2004 ({\Coq} versions 7.x)}}\\ +{\large{\copyright INRIA 2004-2006 ({\Coq} versions 8.x)}}\\ +{\large{#3}} +\end{flushleft} +%BEGIN LATEX +\newpage +%END LATEX +} + + +\newcommand{\shorttitle}[1]{ +\begin{center} +\begin{huge} +\begin{bf} +The Coq Proof Assistant\\ +\vspace{10pt} + #1\\ +\end{bf} +\end{huge} +\end{center} +\vspace{5pt} +} + +% Local Variables: +% mode: LaTeX +% TeX-master: "" +% End: + +% $Id: title.tex 8607 2006-02-23 14:21:14Z herbelin $ + + + + diff --git a/doc/faq/FAQ.tex b/doc/faq/FAQ.tex new file mode 100644 index 00000000..2b5d898f --- /dev/null +++ b/doc/faq/FAQ.tex @@ -0,0 +1,2481 @@ +\RequirePackage{ifpdf} +\ifpdf % si on est en pdflatex +\documentclass[a4paper,pdftex]{article} +\else +\documentclass[a4paper]{article} +\fi +\pagestyle{plain} + +% yay les symboles +\usepackage{stmaryrd} +\usepackage{amssymb} +\usepackage{url} +%\usepackage{multicol} +\usepackage{hevea} +\usepackage{fullpage} +\usepackage[latin1]{inputenc} +\usepackage[english]{babel} + +\ifpdf % si on est en pdflatex + \usepackage[pdftex]{graphicx} +\else + \usepackage[dvips]{graphicx} +\fi + +%\input{../macros.tex} + +% Making hevea happy +%HEVEA \renewcommand{\textbar}{|} +%HEVEA \renewcommand{\textunderscore}{\_} + +\def\Question#1{\stepcounter{question}\subsubsection{#1}} + +% version et date +\def\faqversion{0.1} + +% les macros d'amour +\def\Coq{\textsc{Coq}} +\def\Why{\textsc{Why}} +\def\Caduceus{\textsc{Caduceus}} +\def\Krakatoa{\textsc{Krakatoa}} +\def\Ltac{\textsc{Ltac}} +\def\CoqIde{\textsc{CoqIde}} + +\newcommand{\coqtt}[1]{{\tt #1}} +\newcommand{\coqimp}{{\mbox{\tt ->}}} +\newcommand{\coqequiv}{{\mbox{\tt <->}}} + + +% macro pour les tactics +\def\split{{\tt split}} +\def\assumption{{\tt assumption}} +\def\auto{{\tt auto}} +\def\trivial{{\tt trivial}} +\def\tauto{{\tt tauto}} +\def\left{{\tt left}} +\def\right{{\tt right}} +\def\decompose{{\tt decompose}} +\def\intro{{\tt intro}} +\def\intros{{\tt intros}} +\def\field{{\tt field}} +\def\ring{{\tt ring}} +\def\apply{{\tt apply}} +\def\exact{{\tt exact}} +\def\cut{{\tt cut}} +\def\assert{{\tt assert}} +\def\solve{{\tt solve}} +\def\idtac{{\tt idtac}} +\def\fail{{\tt fail}} +\def\existstac{{\tt exists}} +\def\firstorder{{\tt firstorder}} +\def\congruence{{\tt congruence}} +\def\gb{{\tt gb}} +\def\generalize{{\tt generalize}} +\def\abstracttac{{\tt abstract}} +\def\eapply{{\tt eapply}} +\def\unfold{{\tt unfold}} +\def\rewrite{{\tt rewrite}} +\def\replace{{\tt replace}} +\def\simpl{{\tt simpl}} +\def\elim{{\tt elim}} +\def\set{{\tt set}} +\def\pose{{\tt pose}} +\def\case{{\tt case}} +\def\destruct{{\tt destruct}} +\def\reflexivity{{\tt reflexivity}} +\def\transitivity{{\tt transitivity}} +\def\symmetry{{\tt symmetry}} +\def\Focus{{\tt Focus}} +\def\discriminate{{\tt discriminate}} +\def\contradiction{{\tt contradiction}} +\def\intuition{{\tt intuition}} +\def\try{{\tt try}} +\def\repeat{{\tt repeat}} +\def\eauto{{\tt eauto}} +\def\subst{{\tt subst}} +\def\symmetryin{{\tt symmetryin}} +\def\instantiate{{\tt instantiate}} +\def\inversion{{\tt inversion}} +\def\Defined{{\tt Defined}} +\def\Qed{{\tt Qed}} +\def\pattern{{\tt pattern}} +\def\Type{{\tt Type}} +\def\Prop{{\tt Prop}} +\def\Set{{\tt Set}} + + +\newcommand\vfile[2]{\ahref{#1}{\tt {#2}.v}} +\urldef{\InitWf}{\url} + {http://coq.inria.fr/library/Coq.Init.Wf.html} +\urldef{\LogicBerardi}{\url} + {http://coq.inria.fr/library/Coq.Logic.Berardi.html} +\urldef{\LogicClassical}{\url} + {http://coq.inria.fr/library/Coq.Logic.Classical.html} +\urldef{\LogicClassicalFacts}{\url} + {http://coq.inria.fr/library/Coq.Logic.ClassicalFacts.html} +\urldef{\LogicClassicalDescription}{\url} + {http://coq.inria.fr/library/Coq.Logic.ClassicalDescription.html} +\urldef{\LogicProofIrrelevance}{\url} + {http://coq.inria.fr/library/Coq.Logic.ProofIrrelevance.html} +\urldef{\LogicEqdep}{\url} + {http://coq.inria.fr/library/Coq.Logic.Eqdep.html} +\urldef{\LogicEqdepDec}{\url} + {http://coq.inria.fr/library/Coq.Logic.Eqdep_dec.html} + + + + +\begin{document} +\bibliographystyle{plain} +\newcounter{question} +\renewcommand{\thesubsubsection}{\arabic{question}} + +%%%%%%% Coq pour les nuls %%%%%%% + +\title{Coq Version 8.0 for the Clueless\\ + \large(\protect\ref{lastquestion} + \ Hints) +} +\author{Pierre Castéran \and Hugo Herbelin \and Florent Kirchner \and Benjamin Monate \and Julien Narboux} +\maketitle + +%%%%%%% + +\begin{abstract} +This note intends to provide an easy way to get acquainted with the +{\Coq} theorem prover. It tries to formulate appropriate answers +to some of the questions any newcomers will face, and to give +pointers to other references when possible. +\end{abstract} + +%%%%%%% + +%\begin{multicols}{2} +\tableofcontents +%\end{multicols} + +%%%%%%% + +\newpage + +\section{Introduction} +This FAQ is the sum of the questions that came to mind as we developed +proofs in \Coq. Since we are singularly short-minded, we wrote the +answers we found on bits of papers to have them at hand whenever the +situation occurs again. This is pretty much the result of that: a +collection of tips one can refer to when proofs become intricate. Yes, +this means we won't take the blame for the shortcomings of this +FAQ. But if you want to contribute and send in your own question and +answers, feel free to write to us\ldots + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Presentation} + +\Question{What is {\Coq}?}\label{whatiscoq} +The {\Coq} tool is a formal proof management system: a proof done with {\Coq} is mechanically checked by the machine. +In particular, {\Coq} allows: +\begin{itemize} + \item the definition of mathematical objects and programming objects, + \item to state mathematical theorems and software specifications, + \item to interactively develop formal proofs of these theorems, + \item to check these proofs by a small certification ``kernel''. +\end{itemize} +{\Coq} is based on a logical framework called ``Calculus of Inductive +Constructions'' extended by a modular development system for theories. + +\Question{Did you really need to name it like that?} +Some French computer scientists have a tradition of naming their +software as animal species: Caml, Elan, Foc or Phox are examples +of this tacit convention. In French, ``coq'' means rooster, and it +sounds like the initials of the Calculus of Constructions CoC on which +it is based. + +\Question{Is {\Coq} a theorem prover?} + +{\Coq} comes with decision and semi-decision procedures ( +propositional calculus, Presburger's arithmetic, ring and field +simplification, resolution, ...) but the main style for proving +theorems is interactively by using LCF-style tactics. + + +\Question{What are the other theorem provers?} +Many other theorem provers are available for use nowadays. +Isabelle, HOL, HOL Light, Lego, Nuprl, PVS are examples of provers that are fairly similar +to {\Coq} by the way they interact with the user. Other relatives of +{\Coq} are ACL2, Agda/Alfa, Twelf, Kiv, Mizar, NqThm, +\begin{htmlonly}% +Omega\ldots +\end{htmlonly} +\begin{latexonly}% +{$\Omega$}mega\ldots +\end{latexonly} + +\Question{What do I have to trust when I see a proof checked by Coq?} + +You have to trust: + +\begin{description} +\item[The theory behind Coq] The theory of {\Coq} version 8.0 is +generally admitted to be consistent wrt Zermelo-Fraenkel set theory + +inaccessible cardinals. Proofs of consistency of subsystems of the +theory of Coq can be found in the literature. +\item[The Coq kernel implementation] You have to trust that the +implementation of the {\Coq} kernel mirrors the theory behind {\Coq}. The +kernel is intentionally small to limit the risk of conceptual or +accidental implementation bugs. +\item[The Objective Caml compiler] The {\Coq} kernel is written using the +Objective Caml language but it uses only the most standard features +(no object, no label ...), so that it is highly unprobable that an +Objective Caml bug breaks the consistency of {\Coq} without breaking all +other kinds of features of {\Coq} or of other software compiled with +Objective Caml. +\item[Your hardware] In theory, if your hardware does not work +properly, it can accidentally be the case that False becomes +provable. But it is more likely the case that the whole {\Coq} system +will be unusable. You can check your proof using different computers +if you feel the need to. +\item[Your axioms] Your axioms must be consistent with the theory +behind {\Coq}. +\end{description} + + +\Question{Where can I find information about the theory behind {\Coq}?} +\begin{description} +\item[The Calculus of Inductive Constructions] The +\ahref{http://coq.inria.fr/doc/Reference-Manual006.html}{corresponding} +chapter and the chapter on +\ahref{http://coq.inria.fr/doc/Reference-Manual007.html}{modules} in +the {\Coq} Reference Manual. +\item[Type theory] A book~\cite{ProofsTypes} or some lecture +notes~\cite{Types:Dowek}. +\item[Inductive types] +Christine Paulin-Mohring's habilitation thesis~\cite{Pau96b}. +\item[Co-Inductive types] +Eduardo Giménez' thesis~\cite{EGThese}. +\item[Miscellaneous] A +\ahref{http://coq.inria.fr/doc/biblio.html}{bibliography} about Coq +\end{description} + + +\Question{How can I use {\Coq} to prove programs?} + +You can either extract a program from a proof by using the extraction +mechanism or use dedicated tools, such as +\ahref{http://why.lri.fr}{\Why}, +\ahref{http://krakatoa.lri.fr}{\Krakatoa}, +\ahref{http://why.lri.fr/caduceus/index.en.html}{\Caduceus}, to prove +annotated programs written in other languages. + +%\Question{How many {\Coq} users are there?} +% +%An estimation is about 100 regular users. + +\Question{How old is {\Coq}?} + +The first implementation is from 1985 (it was named {\sf CoC} which is +the acronym of the name of the logic it implemented: the Calculus of +Constructions). The first official release of {\Coq} (version 4.10) +was distributed in 1989. + +\Question{What are the \Coq-related tools?} + +There are graphical user interfaces: +\begin{description} +\item[Coqide] A GTK based GUI for \Coq. +\item[Pcoq] A GUI for {\Coq} with proof by pointing and pretty printing. +\item[coqwc] A tool similar to {\tt wc} to count lines in {\Coq} files. +\item[Proof General] A emacs mode for {\Coq} and many other proof assistants. +\end{description} + +There are documentation and browsing tools: + +\begin{description} +\item[Helm/Mowgli] A rendering, searching and publishing tool. +\item[coq-tex] A tool to insert {\Coq} examples within .tex files. +\item[coqdoc] A documentation tool for \Coq. +\end{description} + +There are front-ends for specific languages: + +\begin{description} +\item[Why] A back-end generator of verification conditions. +\item[Krakatoa] A Java code certification tool that uses both {\Coq} and {\Why} to verify the soundness of implementations with regards to the specifications. +\item[Caduceus] A C code certification tool that uses both {\Coq} and \Why. +\item[Zenon] A first-order theorem prover. +\item[Focal] The \ahref{http://focal.inria.fr}{Focal} project aims at building an environment to develop certified computer algebra libraries. +\end{description} + +\Question{What are the high-level tactics of \Coq} + +\begin{itemize} +\item Decision of quantifier-free Presburger's Arithmetic +\item Simplification of expressions on rings and fields +\item Decision of closed systems of equations +\item Semi-decision of first-order logic +\item Prolog-style proof search, possibly involving equalities +\end{itemize} + +\Question{What are the main libraries available for \Coq} + +\begin{itemize} +\item Basic Peano's arithmetic, binary integer numbers, rational numbers, +\item Real analysis, +\item Libraries for lists, boolean, maps, floating-point numbers, +\item Libraries for relations, sets and constructive algebra, +\item Geometry +\end{itemize} + + +\Question{What are the mathematical applications for {\Coq}?} + +{\Coq} is used for formalizing mathematical theories, for teaching, +and for proving properties of algorithms or programs libraries. + +The largest mathematical formalization has been done at the University +of Nijmegen (see the +\ahref{http://vacuumcleaner.cs.kun.nl/c-corn}{Constructive Coq +Repository at Nijmegen}). + +A symbolic step has also been obtained by formalizing in full a proof +of the Four Color Theorem. + +\Question{What are the industrial applications for {\Coq}?} + +{\Coq} is used e.g. to prove properties of the JavaCard system +(especially by Schlumberger and Trusted Logic). It has +also been used to formalize the semantics of the Lucid-Synchrone +data-flow synchronous calculus used by Esterel-Technologies. + +\iffalse +todo christine compilo lustre? +\fi + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Documentation} + +\Question{Where can I find documentation about {\Coq}?} +All the documentation about \Coq, from the reference manual~\cite{Coq:manual} to +friendly tutorials~\cite{Coq:Tutorial} and documentation of the standard library, is available +\ahref{http://coq.inria.fr/doc-eng.html}{online}. +All these documents are viewable either in browsable HTML, or as +downloadable postscripts. + +\Question{Where can I find this FAQ on the web?} + +This FAQ is available online at \ahref{http://coq.inria.fr/doc/faq.html}{\url{http://coq.inria.fr/doc/faq.html}}. + +\Question{How can I submit suggestions / improvements / additions for this FAQ?} + +This FAQ is unfinished (in the sense that there are some obvious +sections that are missing). Please send contributions to \texttt{Florent.Kirchner at lix.polytechnique.fr} and \texttt{Julien.Narboux at inria.fr}. + +\Question{Is there any mailing list about {\Coq}?} +The main {\Coq} mailing list is \url{coq-club@pauillac.inria.fr}, which +broadcasts questions and suggestions about the implementation, the +logical formalism or proof developments. See +\ahref{http://coq.inria.fr/mailman/listinfo/coq-club}{\url{http://pauillac.inria.fr/mailman/listinfo/coq-club}} for +subscription. For bugs reports see question \ref{coqbug}. + +\Question{Where can I find an archive of the list?} +The archives of the {\Coq} mailing list are available at +\ahref{http://pauillac.inria.fr/pipermail/coq-club}{\url{http://coq.inria.fr/pipermail/coq-club}}. + + +\Question{How can I be kept informed of new releases of {\Coq}?} + +New versions of {\Coq} are announced on the coq-club mailing list. If you only want to receive information about new releases, you can subscribe to {\Coq} on \ahref{http://freshmeat.net/projects/coq/}{\url{http://freshmeat.net/projects/coq/}}. + + +\Question{Is there any book about {\Coq}?} + +The first book on \Coq, Yves Bertot and Pierre Castéran's Coq'Art has been published by Springer-Verlag in 2004: +\begin{quote} +``This book provides a pragmatic introduction to the development of +proofs and certified programs using \Coq. With its large collection of +examples and exercises it is an invaluable tool for researchers, +students, and engineers interested in formal methods and the +development of zero-default software.'' +\end{quote} + +\Question{Where can I find some {\Coq} examples?} + +There are examples in the manual~\cite{Coq:manual} and in the +Coq'Art~\cite{Coq:coqart} exercises \ahref{\url{http://www.labri.fr/Perso/~casteran/CoqArt/index.html}}{\url{http://www.labri.fr/Perso/~casteran/CoqArt/index.html}}. +You can also find large developments using +{\Coq} in the {\Coq} user contributions: +\ahref{http://coq.inria.fr/contrib-eng.html}{\url{http://coq.inria.fr/contrib-eng.html}}. + +\Question{How can I report a bug?}\label{coqbug} + +You can use the web interface accessible at \ahref{http://coq.inria.fr}{\url{http://coq.inria.fr}}, link ``contacts''. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{Installation} + +\Question{What is the license of {\Coq}?} +{\Coq} is distributed under the GNU Lesser General License +(LGPL). + +\Question{Where can I find the sources of {\Coq}?} +The sources of {\Coq} can be found online in the tar.gz'ed packages +(\ahref{http://coq.inria.fr}{\url{http://coq.inria.fr}}, link +``download''). Development sources can be accessed at +\ahref{http://coq.gforge.inria.fr/}{\url{http://coq.gforge.inria.fr/}} + +\Question{On which platform is {\Coq} available?} +Compiled binaries are available for Linux, MacOS X, and Windows. The +sources can be easily compiled on all platforms supporting Objective +Caml. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\section{The logic of {\Coq}} + +\subsection{General} + +\Question{What is the logic of \Coq?} + +{\Coq} is based on an axiom-free type theory called +the Calculus of Inductive Constructions (see Coquand \cite{CoHu86}, +Luo~\cite{Luo90} +and Coquand--Paulin-Mohring \cite{CoPa89}). It includes higher-order +functions and predicates, inductive and co-inductive datatypes and +predicates, and a stratified hierarchy of sets. + +\Question{Is \Coq's logic intuitionistic or classical?} + +{\Coq}'s logic is modular. The core logic is intuitionistic +(i.e. excluded-middle $A\vee\neg A$ is not granted by default). It can +be extended to classical logic on demand by requiring an +optional module stating $A\vee\neg A$. + +\Question{Can I define non-terminating programs in \Coq?} + +All programs in {\Coq} are terminating. Especially, loops +must come with an evidence of their termination. + +Non-terminating programs can be simulated by passing around a +bound on how long the program is allowed to run before dying. + +\Question{How is equational reasoning working in {\Coq}?} + + {\Coq} comes with an internal notion of computation called +{\em conversion} (e.g. $(x+1)+y$ is internally equivalent to +$(x+y)+1$; similarly applying argument $a$ to a function mapping $x$ +to some expression $t$ converts to the expression $t$ where $x$ is +replaced by $a$). This notion of conversion (which is decidable +because {\Coq} programs are terminating) covers a certain part of +equational reasoning but is limited to sequential evaluation of +expressions of (not necessarily closed) programs. Besides conversion, +equations have to be treated by hand or using specialised tactics. + +\subsection{Axioms} + +\Question{What axioms can be safely added to {\Coq}?} + +There are a few typical useful axioms that are independent from the +Calculus of Inductive Constructions and that can be safely added to +{\Coq}. These axioms are stated in the directory {\tt Logic} of the +standard library of {\Coq}. The most interesting ones are + +\begin{itemize} +\item Excluded-middle: $\forall A:Prop, A \vee \neg A$ +\item Proof-irrelevance: $\forall A:Prop \forall p_1 p_2:A, p_1=p_2$ +\item Unicity of equality proofs (or equivalently Streicher's axiom $K$): +$\forall A \forall x y:A \forall p_1 p_2:x=y, p_1=p_2$ +\item The axiom of unique choice: $\forall x \exists! y R(x,y) \rightarrow \exists f \forall x R(x,f(x))$ +\item The functional axiom of choice: $\forall x \exists y R(x,y) \rightarrow \exists f \forall x R(x,f(x))$ +\item Extensionality of predicates: $\forall P Q:A\rightarrow Prop, (\forall x, P(x) \leftrightarrow Q(x)) \rightarrow P=Q$ +\item Extensionality of functions: $\forall f g:A\rightarrow B, (\forall x, f(x)=g(x)) \rightarrow f=g$ +\end{itemize} + +Here is a summary of the relative strength of these axioms, most +proofs can be found in directory {\tt Logic} of the standard library. +The justification of their validity relies on the interpretability in +set theory. + +%HEVEA\imgsrc{axioms.png} +%BEGIN LATEX +\ifpdf % si on est en pdflatex +\includegraphics[width=1.0\textwidth]{axioms.png} +\else +\includegraphics[width=1.0\textwidth]{axioms.eps} +\fi +%END LATEX + +\Question{What standard axioms are inconsistent with {\Coq}?} + +The axiom of unique choice together with classical logic +(e.g. excluded-middle) are inconsistent in the variant of the Calculus +of Inductive Constructions where {\Set} is impredicative. + +As a consequence, the functional form of the axiom of choice and +excluded-middle, or any form of the axiom of choice together with +predicate extensionality are inconsistent in the {\Set}-impredicative +version of the Calculus of Inductive Constructions. + +The main purpose of the \Set-predicative restriction of the Calculus +of Inductive Constructions is precisely to accommodate these axioms +which are quite standard in mathematical usage. + +The $\Set$-predicative system is commonly considered consistent by +interpreting it in a standard set-theoretic boolean model, even with +classical logic, axiom of choice and predicate extensionality added. + +\Question{What is Streicher's axiom $K$} +\label{Streicher} + +Streicher's axiom $K$~\cite{HofStr98} is an axiom that asserts +dependent elimination of reflexive equality proofs. + +\begin{coq_example*} +Axiom Streicher_K : + forall (A:Type) (x:A) (P: x=x -> Prop), + P (refl_equal x) -> forall p: x=x, P p. +\end{coq_example*} + +In the general case, axiom $K$ is an independent statement of the +Calculus of Inductive Constructions. However, it is true on decidable +domains (see file \vfile{\LogicEqdepDec}{Eqdep\_dec}). It is also +trivially a consequence of proof-irrelevance (see +\ref{proof-irrelevance}) hence of classical logic. + +Axiom $K$ is equivalent to {\em Uniqueness of Identity Proofs} \cite{HofStr98} + +\begin{coq_example*} +Axiom UIP : forall (A:Set) (x y:A) (p1 p2: x=y), p1 = p2. +\end{coq_example*} + +Axiom $K$ is also equivalent to {\em Uniqueness of Reflexive Identity Proofs} \cite{HofStr98} + +\begin{coq_example*} +Axiom UIP_refl : forall (A:Set) (x:A) (p: x=x), p = refl_equal x. +\end{coq_example*} + +Axiom $K$ is also equivalent to + +\begin{coq_example*} +Axiom + eq_rec_eq : + forall (A:Set) (x:A) (P: A->Set) (p:P x) (h: x=x), + p = eq_rect x P p x h. +\end{coq_example*} + +It is also equivalent to the injectivity of dependent equality (dependent equality is itself equivalent to equality of dependent pairs). + +\begin{coq_example*} +Inductive eq_dep (U:Set) (P:U -> Set) (p:U) (x:P p) : +forall q:U, P q -> Prop := + eq_dep_intro : eq_dep U P p x p x. +Axiom + eq_dep_eq : + forall (U:Set) (u:U) (P:U -> Set) (p1 p2:P u), + eq_dep U P u p1 u p2 -> p1 = p2. +\end{coq_example*} + +\Question{What is proof-irrelevance} +\label{proof-irrelevance} + +A specificity of the Calculus of Inductive Constructions is to permit +statements about proofs. This leads to the question of comparing two +proofs of the same proposition. Identifying all proofs of the same +proposition is called {\em proof-irrelevance}: +$$ +\forall A:\Prop, \forall p q:A, p=q +$$ + +Proof-irrelevance (in {\Prop}) can be assumed without contradiction in +{\Coq}. It expresses that only provability matters, whatever the exact +form of the proof is. This is in harmony with the common purely +logical interpretation of {\Prop}. Contrastingly, proof-irrelevance is +inconsistent in {\Set} since there are types in {\Set}, such as the +type of booleans, that are provably more than 2 elements. + +Proof-irrelevance (in {\Prop}) is a consequence of classical logic +(see proofs in file \vfile{\LogicClassical}{Classical} and +\vfile{\LogicBerardi}{Berardi}). Proof-irrelevance is also a +consequence of propositional extensionality (i.e. \coqtt{(A {\coqequiv} B) +{\coqimp} A=B}, see the proof in file +\vfile{\LogicClassicalFacts}{ClassicalFacts}). + +Proof-irrelevance directly implies Streicher's axiom $K$. + +\Question{What about functional extensionality?} + +Extensionality of functions is admittedly consistent with the +Set-predicative Calculus of Inductive Constructions. + +%\begin{coq_example*} +% Axiom extensionality : (A,B:Set)(f,g:(A->B))(x:A)(f x)=(g x)->f=g. +%\end{coq_example*} + +Let {\tt A}, {\tt B} be types. To deal with extensionality on +\verb=A->B= without relying on a general extensionality axiom, +a possible approach is to define one's own extensional equality on +\verb=A->B=. + +\begin{coq_eval} +Variables A B : Set. +\end{coq_eval} + +\begin{coq_example*} +Definition ext_eq (f g: A->B) := forall x:A, f x = g x. +\end{coq_example*} + +and to reason on \verb=A->B= as a setoid (see the Chapter on +Setoids in the Reference Manual). + +\Question{Is {\Prop} impredicative?} + +Yes, the sort {\Prop} of propositions is {\em +impredicative}. Otherwise said, a statement of the form $\forall +A:Prop, P(A)$ can be instantiated by itself: if $\forall A:\Prop, P(A)$ +is provable, then $P(\forall A:\Prop, P(A))$ is. + +\Question{Is {\Set} impredicative?} + +No, the sort {\Set} lying at the bottom of the hierarchy of +computational types is {\em predicative} in the basic {\Coq} system. +This means that a family of types in {\Set}, e.g. $\forall A:\Set, A +\rightarrow A$, is not a type in {\Set} and it cannot be applied on +itself. + +However, the sort {\Set} was impredicative in the original versions of +{\Coq}. For backward compatibility, or for experiments by +knowledgeable users, the logic of {\Coq} can be set impredicative for +{\Set} by calling {\Coq} with the option {\tt -impredicative-set}. + +{\Set} has been made predicative from version 8.0 of {\Coq}. The main +reason is to interact smoothly with a classical mathematical world +where both excluded-middle and the axiom of description are valid (see +file \vfile{\LogicClassicalDescription}{ClassicalDescription} for a +proof that excluded-middle and description implies the double negation +of excluded-middle in {\Set} and file {\tt Hurkens\_Set.v} from the +user contribution {\tt Rocq/PARADOXES} for a proof that +impredicativity of {\Set} implies the simple negation of +excluded-middle in {\Set}). + +\Question{Is {\Type} impredicative?} + +No, {\Type} is stratified. This is hidden for the +user, but {\Coq} internally maintains a set of constraints ensuring +stratification. + +If {\Type} were impredicative then it would be possible to encode +Girard's systems $U-$ and $U$ in {\Coq} and it is known from Girard, +Coquand, Hurkens and Miquel that systems $U-$ and $U$ are inconsistent +[Girard 1972, Coquand 1991, Hurkens 1993, Miquel 2001]. This encoding +can be found in file {\tt Logic/Hurkens.v} of {\Coq} standard library. + +For instance, when the user see {\tt $\forall$ X:Type, X->X : Type}, each +occurrence of {\Type} is implicitly bound to a different level, say +$\alpha$ and $\beta$ and the actual statement is {\tt +forall X:Type($\alpha$), X->X : Type($\beta$)} with the constraint +$\alpha<\beta$. + +When a statement violates a constraint, the message {\tt Universe +inconsistency} appears. Example: {\tt fun (x:Type) (y:$\forall$ X:Type, X +{\coqimp} X) => y x x}. + +\Question{I have two proofs of the same proposition. Can I prove they are equal?} + +In the base {\Coq} system, the answer is generally no. However, if +classical logic is set, the answer is yes for propositions in {\Prop}. +The answer is also yes if proof irrelevance holds (see question +\ref{proof-irrelevance}). + +There are also ``simple enough'' propositions for which you can prove +the equality without requiring any extra axioms. This is typically +the case for propositions defined deterministically as a first-order +inductive predicate on decidable sets. See for instance in question +\ref{le-uniqueness} an axiom-free proof of the unicity of the proofs of +the proposition {\tt le m n} (less or equal on {\tt nat}). + +% It is an ongoing work of research to natively include proof +% irrelevance in {\Coq}. + +\Question{I have two proofs of an equality statement. Can I prove they are +equal?} + + Yes, if equality is decidable on the domain considered (which +is the case for {\tt nat}, {\tt bool}, etc): see {\Coq} file +\verb=Eqdep_dec.v=). No otherwise, unless +assuming Streicher's axiom $K$ (see \cite{HofStr98}) or a more general +assumption such as proof-irrelevance (see \ref{proof-irrelevance}) or +classical logic. + +All of these statements can be found in file \vfile{\LogicEqdep}{Eqdep}. + +\Question{Can I prove that the second components of equal dependent +pairs are equal?} + + The answer is the same as for proofs of equality +statements. It is provable if equality on the domain of the first +component is decidable (look at \verb=inj_right_pair= from file +\vfile{\LogicEqdepDec}{Eqdep\_dec}), but not provable in the general +case. However, it is consistent (with the Calculus of Constructions) +to assume it is true. The file \vfile{\LogicEqdep}{Eqdep} actually +provides an axiom (equivalent to Streicher's axiom $K$) which entails +the result (look at \verb=inj_pair2= in \vfile{\LogicEqdep}{Eqdep}). + +\subsection{Impredicativity} + +\Question{Why {\tt injection} does not work on impredicative {\tt Set}?} + + E.g. in this case (this occurs only in the {\tt Set}-impredicative + variant of \Coq): + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{coq_example*} +Inductive I : Type := + intro : forall k:Set, k -> I. +Lemma eq_jdef : + forall x y:nat, intro _ x = intro _ y -> x = y. +Proof. + intros x y H; injection H. +\end{coq_example*} + + Injectivity of constructors is restricted to predicative types. If +injectivity on large inductive types were not restricted, we would be +allowed to derive an inconsistency (e.g. following the lines of +Burali-Forti paradox). The question remains open whether injectivity +is consistent on some large inductive types not expressive enough to +encode known paradoxes (such as type I above). + + +\Question{What is a ``large inductive definition''?} + +An inductive definition in {\Prop} or {\Set} is called large +if its constructors embed sets or propositions. As an example, here is +a large inductive type: + +\begin{coq_example*} +Inductive sigST (P:Set -> Set) : Type := + existST : forall X:Set, P X -> sigST P. +\end{coq_example*} + +In the {\tt Set} impredicative variant of {\Coq}, large inductive +definitions in {\tt Set} have restricted elimination schemes to +prevent inconsistencies. Especially, projecting the set or the +proposition content of a large inductive definition is forbidden. If +it were allowed, it would be possible to encode e.g. Burali-Forti +paradox \cite{Gir70,Coq85}. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Talkin' with the Rooster} + + +%%%%%%% +\subsection{My goal is ..., how can I prove it?} + + +\Question{My goal is a conjunction, how can I prove it?} + +Use some theorem or assumption or use the {\split} tactic. +\begin{coq_example} +Goal forall A B:Prop, A->B-> A/\B. +intros. +split. +assumption. +assumption. +Qed. +\end{coq_example} + +\Question{My goal contains a conjunction as an hypothesis, how can I use it?} + +If you want to decompose your hypothesis into other hypothesis you can use the {\decompose} tactic: + +\begin{coq_example} +Goal forall A B:Prop, A/\B-> B. +intros. +decompose [and] H. +assumption. +Qed. +\end{coq_example} + + +\Question{My goal is a disjunction, how can I prove it?} + +You can prove the left part or the right part of the disjunction using +{\left} or {\right} tactics. If you want to do a classical +reasoning step, use the {\tt classic} axiom to prove the right part with the assumption +that the left part of the disjunction is false. + +\begin{coq_example} +Goal forall A B:Prop, A-> A\/B. +intros. +left. +assumption. +Qed. +\end{coq_example} + +An example using classical reasoning: + +\begin{coq_example} +Require Import Classical. + +Ltac classical_right := +match goal with +| _:_ |-?X1 \/ _ => (elim (classic X1);intro;[left;trivial|right]) +end. + +Ltac classical_left := +match goal with +| _:_ |- _ \/?X1 => (elim (classic X1);intro;[right;trivial|left]) +end. + + +Goal forall A B:Prop, (~A -> B) -> A\/B. +intros. +classical_right. +auto. +Qed. +\end{coq_example} + +\Question{My goal is an universally quantified statement, how can I prove it?} + +Use some theorem or assumption or introduce the quantified variable in +the context using the {\intro} tactic. If there are several +variables you can use the {\intros} tactic. A good habit is to +provide names for these variables: {\Coq} will do it anyway, but such +automatic naming decreases legibility and robustness. + + +\Question{My goal is an existential, how can I prove it?} + +Use some theorem or assumption or exhibit the witness using the {\existstac} tactic. +\begin{coq_example} +Goal exists x:nat, forall y, x+y=y. +exists 0. +intros. +auto. +Qed. +\end{coq_example} + + +\Question{My goal is solvable by some lemma, how can I prove it?} + +Just use the {\apply} tactic. + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{coq_example} +Lemma mylemma : forall x, x+0 = x. +auto. +Qed. + +Goal 3+0 = 3. +apply mylemma. +Qed. +\end{coq_example} + + + +\Question{My goal contains False as an hypothesis, how can I prove it?} + +You can use the {\contradiction} or {\intuition} tactics. + + +\Question{My goal is an equality of two convertible terms, how can I prove it?} + +Just use the {\reflexivity} tactic. + +\begin{coq_example} +Goal forall x, 0+x = x. +intros. +reflexivity. +Qed. +\end{coq_example} + +\Question{My goal is a {\tt let x := a in ...}, how can I prove it?} + +Just use the {\intro} tactic. + + +\Question{My goal is a {\tt let (a, ..., b) := c in}, how can I prove it?} + +Just use the {\destruct} c as (a,...,b) tactic. + + +\Question{My goal contains some existential hypotheses, how can I use it?} + +You can use the tactic {\elim} with you hypotheses as an argument. + +\Question{My goal contains some existential hypotheses, how can I use it and decompose my knowledge about this new thing into different hypotheses?} + +\begin{verbatim} +Ltac DecompEx H P := elim H;intro P;intro TO;decompose [and] TO;clear TO;clear H. +\end{verbatim} + + +\Question{My goal is an equality, how can I swap the left and right hand terms?} + +Just use the {\symmetry} tactic. +\begin{coq_example} +Goal forall x y : nat, x=y -> y=x. +intros. +symmetry. +assumption. +Qed. +\end{coq_example} + +\Question{My hypothesis is an equality, how can I swap the left and right hand terms?} + +Just use the {\symmetryin} tactic. + +\begin{coq_example} +Goal forall x y : nat, x=y -> y=x. +intros. +symmetry in H. +assumption. +Qed. +\end{coq_example} + + +\Question{My goal is an equality, how can I prove it by transitivity?} + +Just use the {\transitivity} tactic. +\begin{coq_example} +Goal forall x y z : nat, x=y -> y=z -> x=z. +intros. +transitivity y. +assumption. +assumption. +Qed. +\end{coq_example} + + +\Question{My goal would be solvable using {\tt apply;assumption} if it would not create meta-variables, how can I prove it?} + +You can use {\tt eapply yourtheorem;eauto} but it won't work in all cases ! (for example if more than one hypothesis match one of the subgoals generated by \eapply) so you should rather use {\tt try solve [eapply yourtheorem;eauto]}, otherwise some metavariables may be incorrectly instantiated. + +\begin{coq_example} +Lemma trans : forall x y z : nat, x=y -> y=z -> x=z. +intros. +transitivity y;assumption. +Qed. + +Goal forall x y z : nat, x=y -> y=z -> x=z. +intros. +eapply trans;eauto. +Qed. + +Goal forall x y z t : nat, x=y -> x=t -> y=z -> x=z. +intros. +eapply trans;eauto. +Undo. +eapply trans. +apply H. +auto. +Qed. + +Goal forall x y z t : nat, x=y -> x=t -> y=z -> x=z. +intros. +eapply trans;eauto. +Undo. +try solve [eapply trans;eauto]. +eapply trans. +apply H. +auto. +Qed. + +\end{coq_example} + +\Question{My goal is solvable by some lemma within a set of lemmas and I don't want to remember which one, how can I prove it?} + +You can use a what is called a hints' base. + +\begin{coq_example} +Require Import ZArith. +Require Ring. +Open Local Scope Z_scope. +Lemma toto1 : 1+1 = 2. +ring. +Qed. +Lemma toto2 : 2+2 = 4. +ring. +Qed. +Lemma toto3 : 2+1 = 3. +ring. +Qed. + +Hint Resolve toto1 toto2 toto3 : mybase. + +Goal 2+(1+1)=4. +auto with mybase. +Qed. +\end{coq_example} + + +\Question{My goal is one of the hypotheses, how can I prove it?} + +Use the {\assumption} tactic. + +\begin{coq_example} +Goal 1=1 -> 1=1. +intro. +assumption. +Qed. +\end{coq_example} + + +\Question{My goal appears twice in the hypotheses and I want to choose which one is used, how can I do it?} + +Use the {\exact} tactic. +\begin{coq_example} +Goal 1=1 -> 1=1 -> 1=1. +intros. +exact H0. +Qed. +\end{coq_example} + +\Question{What can be the difference between applying one hypothesis or another in the context of the last question?} + +From a proof point of view it is equivalent but if you want to extract +a program from your proof, the two hypotheses can lead to different +programs. + + +\Question{My goal is a propositional tautology, how can I prove it?} + +Just use the {\tauto} tactic. +\begin{coq_example} +Goal forall A B:Prop, A-> (A\/B) /\ A. +intros. +tauto. +Qed. +\end{coq_example} + +\Question{My goal is a first order formula, how can I prove it?} + +Just use the semi-decision tactic: \firstorder. + +\iffalse +todo: demander un exemple à Pierre +\fi + +\Question{My goal is solvable by a sequence of rewrites, how can I prove it?} + +Just use the {\congruence} tactic. +\begin{coq_example} +Goal forall a b c d e, a=d -> b=e -> c+b=d -> c+e=a. +intros. +congruence. +Qed. +\end{coq_example} + + +\Question{My goal is a disequality solvable by a sequence of rewrites, how can I prove it?} + +Just use the {\congruence} tactic. + +\begin{coq_example} +Goal forall a b c d, a<>d -> b=a -> d=c+b -> b<>c+b. +intros. +congruence. +Qed. +\end{coq_example} + + +\Question{My goal is an equality on some ring (e.g. natural numbers), how can I prove it?} + +Just use the {\ring} tactic. + +\begin{coq_example} +Require Import ZArith. +Require Ring. +Open Local Scope Z_scope. +Goal forall a b : Z, (a+b)*(a+b) = a*a + 2*a*b + b*b. +intros. +ring. +Qed. +\end{coq_example} + +\Question{My goal is an equality on some field (e.g. real numbers), how can I prove it?} + +Just use the {\field} tactic. + +\begin{coq_example} +Require Import Reals. +Require Ring. +Open Local Scope R_scope. +Goal forall a b : R, b*a<>0 -> (a/b) * (b/a) = 1. +intros. +field. +assumption. +Qed. +\end{coq_example} + + +\Question{My goal is an inequality on integers in Presburger's arithmetic (an expression build from +,-,constants and variables), how can I prove it?} + + +\begin{coq_example} +Require Import ZArith. +Require Omega. +Open Local Scope Z_scope. +Goal forall a : Z, a>0 -> a+a > a. +intros. +omega. +Qed. +\end{coq_example} + + +\Question{My goal is an equation solvable using equational hypothesis on some ring (e.g. natural numbers), how can I prove it?} + +You need the {\gb} tactic (see Loïc Pottier's homepage). + +\subsection{Tactics usage} + +\Question{I want to state a fact that I will use later as an hypothesis, how can I do it?} + +If you want to use forward reasoning (first proving the fact and then +using it) you just need to use the {\assert} tactic. If you want to use +backward reasoning (proving your goal using an assumption and then +proving the assumption) use the {\cut} tactic. + +\begin{coq_example} +Goal forall A B C D : Prop, (A -> B) -> (B->C) -> A -> C. +intros. +assert (A->C). +intro;apply H0;apply H;assumption. +apply H2. +assumption. +Qed. + +Goal forall A B C D : Prop, (A -> B) -> (B->C) -> A -> C. +intros. +cut (A->C). +intro. +apply H2;assumption. +intro;apply H0;apply H;assumption. +Qed. +\end{coq_example} + + + + +\Question{I want to state a fact that I will use later as an hypothesis and prove it later, how can I do it?} + +You can use {\cut} followed by {\intro} or you can use the following {\Ltac} command: +\begin{verbatim} +Ltac assert_later t := cut t;[intro|idtac]. +\end{verbatim} + +\Question{What is the difference between {\Qed} and {\Defined}?} + +These two commands perform type checking, but when {\Defined} is used the new definition is set as transparent, otherwise it is defined as opaque (see \ref{opaque}). + + +\Question{How can I know what a tactic does?} + +You can use the {\tt info} command. + + + +\Question{Why {\auto} does not work? How can I fix it?} + +You can increase the depth of the proof search or add some lemmas in the base of hints. +Perhaps you may need to use \eauto. + +\Question{What is {\eauto}?} + +This is the same tactic as \auto, but it relies on {\eapply} instead of \apply. + +\iffalse +todo les espaces +\fi + +\Question{How can I speed up {\auto}?} + +You can use \texttt{info }\auto to replace {\auto} by the tactics it generates. +You can split your hint bases into smaller ones. + + +\Question{What is the equivalent of {\tauto} for classical logic?} + +Currently there are no equivalent tactic for classical logic. You can use Gödel's ``not not'' translation. + + +\Question{I want to replace some term with another in the goal, how can I do it?} + +If one of your hypothesis (say {\tt H}) states that the terms are equal you can use the {\rewrite} tactic. Otherwise you can use the {\replace} {\tt with} tactic. + +\Question{I want to replace some term with another in an hypothesis, how can I do it?} + +You can use the {\rewrite} {\tt in} tactic. + +\Question{I want to replace some symbol with its definition, how can I do it?} + +You can use the {\unfold} tactic. + +\Question{How can I reduce some term?} + +You can use the {\simpl} tactic. + +\Question{How can I declare a shortcut for some term?} + +You can use the {\set} or {\pose} tactics. + +\Question{How can I perform case analysis?} + +You can use the {\case} or {\destruct} tactics. + + +\Question{Why should I name my intros?} + +When you use the {\intro} tactic you don't have to give a name to your +hypothesis. If you do so the name will be generated by {\Coq} but your +scripts may be less robust. If you add some hypothesis to your theorem +(or change their order), you will have to change your proof to adapt +to the new names. + +\Question{How can I automatize the naming?} + +You can use the {\tt Show Intro.} or {\tt Show Intros.} commands to generate the names and use your editor to generate a fully named {\intro} tactic. +This can be automatized within {\tt xemacs}. + +\begin{coq_example} +Goal forall A B C : Prop, A -> B -> C -> A/\B/\C. +Show Intros. +(* +A B C H H0 +H1 +*) +intros A B C H H0 H1. +repeat split;assumption. +Qed. +\end{coq_example} + +\Question{I want to automatize the use of some tactic, how can I do it?} + +You need to use the {\tt proof with T} command and add {\ldots} at the +end of your sentences. + +For instance: +\begin{coq_example} +Goal forall A B C : Prop, A -> B/\C -> A/\B/\C. +Proof with assumption. +intros. +split... +Qed. +\end{coq_example} + +\Question{I want to execute the {\texttt proof with} tactic only if it solves the goal, how can I do it?} + +You need to use the {\try} and {\solve} tactics. For instance: +\begin{coq_example} +Require Import ZArith. +Require Ring. +Open Local Scope Z_scope. +Goal forall a b c : Z, a+b=b+a. +Proof with try solve [ring]. +intros... +Qed. +\end{coq_example} + +\Question{How can I do the opposite of the {\intro} tactic?} + +You can use the {\generalize} tactic. + +\begin{coq_example} +Goal forall A B : Prop, A->B-> A/\B. +intros. +generalize H. +intro. +auto. +Qed. +\end{coq_example} + +\Question{One of the hypothesis is an equality between a variable and some term, I want to get rid of this variable, how can I do it?} + +You can use the {\subst} tactic. This will rewrite the equality everywhere and clear the assumption. + +\Question{What can I do if I get ``{\tt generated subgoal term has metavariables in it }''?} + +You should use the {\eapply} tactic, this will generate some goals containing metavariables. + +\Question{How can I instantiate some metavariable?} + +Just use the {\instantiate} tactic. + + +\Question{What is the use of the {\pattern} tactic?} + +The {\pattern} tactic transforms the current goal, performing +beta-expansion on all the applications featuring this tactic's +argument. For instance, if the current goal includes a subterm {\tt +phi(t)}, then {\tt pattern t} transforms the subterm into {\tt (fun +x:A => phi(x)) t}. This can be useful when {\apply} fails on matching, +to abstract the appropriate terms. + +\Question{What is the difference between assert, cut and generalize?} + +PS: Notice for people that are interested in proof rendering that \assert +and {\pose} (and \cut) are not rendered the same as {\generalize} (see the +HELM experimental rendering tool at \ahref{http://helm.cs.unibo.it/library.html}{\url{http://helm.cs.unibo.it}}, link +HELM, link COQ Online). Indeed {\generalize} builds a beta-expanded term +while \assert, {\pose} and {\cut} uses a let-in. + +\begin{verbatim} + (* Goal is T *) + generalize (H1 H2). + (* Goal is A->T *) + ... a proof of A->T ... +\end{verbatim} + +is rendered into something like +\begin{verbatim} + (h) ... the proof of A->T ... + we proved A->T + (h0) by (H1 H2) we proved A + by (h h0) we proved T +\end{verbatim} +while +\begin{verbatim} + (* Goal is T *) + assert q := (H1 H2). + (* Goal is A *) + ... a proof of A ... + (* Goal is A |- T *) + ... a proof of T ... +\end{verbatim} +is rendered into something like +\begin{verbatim} + (q) ... the proof of A ... + we proved A + ... the proof of T ... + we proved T +\end{verbatim} +Otherwise said, {\generalize} is not rendered in a forward-reasoning way, +while {\assert} is. + +\Question{What can I do if \Coq can not infer some implicit argument ?} + +You can state explicitely what this implicit argument is. See \ref{implicit}. + +\Question{How can I explicit some implicit argument ?}\label{implicit} + +Just use \texttt{A:=term} where \texttt{A} is the argument. + +For instance if you want to use the existence of ``nil'' on nat*nat lists: +\begin{verbatim} +exists (nil (A:=(nat*nat))). +\end{verbatim} + +\iffalse +\Question{Is there anyway to do pattern matching with dependent types?} + +todo +\fi + +\subsection{Proof management} + + +\Question{How can I change the order of the subgoals?} + +You can use the {\Focus} command to concentrate on some goal. When the goal is proved you will see the remaining goals. + +\Question{How can I change the order of the hypothesis?} + +You can use the {\tt Move ... after} command. + +\Question{How can I change the name of an hypothesis?} + +You can use the {\tt Rename ... into} command. + +\Question{How can I delete some hypothesis?} + +You can use the {\tt Clear} command. + +\Question{How can use a proof which is not finished?} + +You can use the {\tt Admitted} command to state your current proof as an axiom. + +\Question{How can I state a conjecture?} + +You can use the {\tt Admitted} command to state your current proof as an axiom. + +\Question{What is the difference between a lemma, a fact and a theorem?} + +From {\Coq} point of view there are no difference. But some tools can +have a different behavior when you use a lemma rather than a +theorem. For instance {\tt coqdoc} will not generate documentation for +the lemmas within your development. + +\Question{How can I organize my proofs?} + +You can organize your proofs using the section mechanism of \Coq. Have +a look at the manual for further information. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Inductive and Co-inductive types} + +\subsection{General} + +\Question{How can I prove that two constructors are different?} + +You can use the {\discriminate} tactic. + +\begin{coq_example} +Inductive toto : Set := | C1 : toto | C2 : toto. +Goal C1 <> C2. +discriminate. +Qed. +\end{coq_example} + +\Question{During an inductive proof, how to get rid of impossible cases of an inductive definition?} + +Use the {\inversion} tactic. + + +\Question{How can I prove that 2 terms in an inductive set are equal? Or different?} + +Have a look at \coqtt{decide equality} and \coqtt{discriminate} in the \ahref{http://coq.inria.fr/doc/main.html}{Reference Manual}. + +\Question{Why is the proof of \coqtt{0+n=n} on natural numbers +trivial but the proof of \coqtt{n+0=n} is not?} + + Since \coqtt{+} (\coqtt{plus}) on natural numbers is defined by analysis on its first argument + +\begin{coq_example} +Print plus. +\end{coq_example} + +{\noindent} The expression \coqtt{0+n} evaluates to \coqtt{n}. As {\Coq} reasons +modulo evaluation of expressions, \coqtt{0+n} and \coqtt{n} are +considered equal and the theorem \coqtt{0+n=n} is an instance of the +reflexivity of equality. On the other side, \coqtt{n+0} does not +evaluate to \coqtt{n} and a proof by induction on \coqtt{n} is +necessary to trigger the evaluation of \coqtt{+}. + +\Question{Why is dependent elimination in Prop not +available by default?} + + +This is just because most of the time it is not needed. To derive a +dependent elimination principle in {\tt Prop}, use the command {\tt Scheme} and +apply the elimination scheme using the \verb=using= option of +\verb=elim=, \verb=destruct= or \verb=induction=. + + +\Question{Argh! I cannot write expressions like ``~{\tt if n <= p then p else n}~'', as in any programming language} +\label{minmax} + +The short answer : You should use {\texttt le\_lt\_dec n p} instead.\\ + +That's right, you can't. +If you type for instance the following ``definition'': +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example} +Definition max (n p : nat) := if n <= p then p else n. +\end{coq_example} + +As \Coq~ says, the term ``~\texttt{n <= p}~'' is a proposition, i.e. a +statement that belongs to the mathematical world. There are many ways to +prove such a proposition, either by some computation, or using some already +proven theoremas. For instance, proving $3-2 \leq 2^{45503}$ is very easy, +using some theorems on arithmetical operations. If you compute both numbers +before comparing them, you risk to use a lot of time and space. + + +On the contrary, a function for computing the greatest of two natural numbers +is an algorithm which, called on two natural numbers +$n$ and $p$, determines wether $n\leq p$ or $p < n$. +Such a function is a \emph{decision procedure} for the inequality of + \texttt{nat}. The possibility of writing such a procedure comes +directly from de decidability of the order $\leq$ on natural numbers. + + +When you write a piece of code like +``~\texttt{if n <= p then \dots{} else \dots}~'' +in a +programming language like \emph{ML} or \emph{Java}, a call to such a +decision procedure is generated. The decision procedure is in general +a primitive function, written in a low-level language, in the correctness +of which you have to trust. + +The standard Library of the system \emph{Coq} contains a +(constructive) proof of decidability of the order $\leq$ on +\texttt{nat} : the function \texttt{le\_lt\_dec} of +the module \texttt{Compare\_dec} of library \texttt{Arith}. + +The following code shows how to define correctly \texttt{min} and +\texttt{max}, and prove some properties of these functions. + +\begin{coq_example} +Require Import Compare_dec. + +Definition max (n p : nat) := if le_lt_dec n p then p else n. + +Definition min (n p : nat) := if le_lt_dec n p then n else p. + +Eval compute in (min 4 7). + +Theorem min_plus_max : forall n p, min n p + max n p = n + p. +Proof. + intros n p; + unfold min, max; + case (le_lt_dec n p); + simpl; auto with arith. +Qed. + +Theorem max_equiv : forall n p, max n p = p <-> n <= p. +Proof. + unfold max; intros n p; case (le_lt_dec n p);simpl; auto. + intuition auto with arith. + split. + intro e; rewrite e; auto with arith. + intro H; absurd (p < p); eauto with arith. +Qed. +\end{coq_example} + +\Question{I wrote my own decision procedure for $\leq$, which +is much faster than yours, but proving such theorems as + \texttt{max\_equiv} seems to be quite difficult} + +Your code is probably the following one: + +\begin{coq_example} +Fixpoint my_le_lt_dec (n p :nat) {struct n}: bool := + match n, p with 0, _ => true + | S n', S p' => my_le_lt_dec n' p' + | _ , _ => false + end. + +Definition my_max (n p:nat) := if my_le_lt_dec n p then p else n. + +Definition my_min (n p:nat) := if my_le_lt_dec n p then n else p. +\end{coq_example} + + +For instance, the computation of \texttt{my\_max 567 321} is almost +immediate, whereas one can't wait for the result of +\texttt{max 56 32}, using \emph{Coq's} \texttt{le\_lt\_dec}. + +This is normal. Your definition is a simple recursive function which +returns a boolean value. Coq's \texttt{le\_lt\_dec} is a \emph{certified +function}, i.e. a complex object, able not only to tell wether $n\leq p$ +or $p n <= p. + +Theorem my_le_lt_dec_false : + forall n p, my_le_lt_dec n p = false <-> p < n. +\end{coq_example*} + + +\subsection{Recursion} + +\Question{Why can't I define a non terminating program?} + + Because otherwise the decidability of the type-checking +algorithm (which involves evaluation of programs) is not ensured. On +another side, if non terminating proofs were allowed, we could get a +proof of {\tt False}: + +\begin{coq_example*} +(* This is fortunately not allowed! *) +Fixpoint InfiniteProof (n:nat) : False := InfiniteProof n. +Theorem Paradox : False. +Proof (InfiniteProof O). +\end{coq_example*} + + +\Question{Why only structurally well-founded loops are allowed?} + + The structural order on inductive types is a simple and +powerful notion of termination. The consistency of the Calculus of +Inductive Constructions relies on it and another consistency proof +would have to be made for stronger termination arguments (such +as the termination of the evaluation of CIC programs themselves!). + +In spite of this, all non-pathological termination orders can be mapped +to a structural order. Tools to do this are provided in the file +\vfile{\InitWf}{Wf} of the standard library of {\Coq}. + +\Question{How to define loops based on non structurally smaller +recursive calls?} + + The procedure is as follows (we consider the definition of {\tt +mergesort} as an example). + +\begin{itemize} + +\item Define the termination order, say {\tt R} on the type {\tt A} of +the arguments of the loop. + +\begin{coq_eval} +Open Scope R_scope. +Require Import List. +\end{coq_eval} + +\begin{coq_example*} +Definition R (a b:list nat) := length a < length b. +\end{coq_example*} + +\item Prove that this order is well-founded (in fact that all elements in {\tt A} are accessible along {\tt R}). + +\begin{coq_example*} +Lemma Rwf : well_founded R. +\end{coq_example*} + +\item Define the step function (which needs proofs that recursive +calls are on smaller arguments). + +\begin{coq_example*} +Definition split (l : list nat) + : {l1: list nat | R l1 l} * {l2 : list nat | R l2 l} + := (* ... *) . +Definition concat (l1 l2 : list nat) : list nat := (* ... *) . +Definition merge_step (l : list nat) (f: forall l':list nat, R l' l -> list nat) := + let (lH1,lH2) := (split l) in + let (l1,H1) := lH1 in + let (l2,H2) := lH2 in + concat (f l1 H1) (f l2 H2). +\end{coq_example*} + +\item Define the recursive function by fixpoint on the step function. + +\begin{coq_example*} +Definition merge := Fix Rwf (fun _ => list nat) merge_step. +\end{coq_example*} + +\end{itemize} + +\Question{What is behind the accessibility and well-foundedness proofs?} + + Well-foundedness of some relation {\tt R} on some type {\tt A} +is defined as the accessibility of all elements of {\tt A} along {\tt R}. + +\begin{coq_example} +Print well_founded. +Print Acc. +\end{coq_example} + +The structure of the accessibility predicate is a well-founded tree +branching at each node {\tt x} in {\tt A} along all the nodes {\tt x'} +less than {\tt x} along {\tt R}. Any sequence of elements of {\tt A} +decreasing along the order {\tt R} are branches in the accessibility +tree. Hence any decreasing along {\tt R} is mapped into a structural +decreasing in the accessibility tree of {\tt R}. This is emphasised in +the definition of {\tt fix} which recurs not on its argument {\tt x:A} +but on the accessibility of this argument along {\tt R}. + +See file \vfile{\InitWf}{Wf}. + +\Question{How to perform simultaneous double induction?} + + In general a (simultaneous) double induction is simply solved by an +induction on the first hypothesis followed by an inversion over the +second hypothesis. Here is an example + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{coq_example} +Inductive even : nat -> Prop := + | even_O : even 0 + | even_S : forall n:nat, even n -> even (S (S n)). + +Inductive odd : nat -> Prop := + | odd_SO : odd 1 + | odd_S : forall n:nat, odd n -> odd (S (S n)). + +Lemma not_even_and_odd : forall n:nat, even n -> odd n -> False. +induction 1. + inversion 1. + inversion 1. apply IHeven; trivial. +\end{coq_example} +\begin{coq_eval} +Qed. +\end{coq_eval} + +In case the type of the second induction hypothesis is not +dependent, {\tt inversion} can just be replaced by {\tt destruct}. + +\Question{How to define a function by simultaneous double recursion?} + + The same trick applies, you can even use the pattern-matching +compilation algorithm to do the work for you. Here is an example: + +\begin{coq_example} +Fixpoint minus (n m:nat) {struct n} : nat := + match n, m with + | O, _ => 0 + | S k, O => S k + | S k, S l => minus k l + end. +Print minus. +\end{coq_example} + +In case of dependencies in the type of the induction objects +$t_1$ and $t_2$, an extra argument stating $t_1=t_2$ must be given to +the fixpoint definition + +\Question{How to perform nested and double induction?} + + To reason by nested (i.e. lexicographic) induction, just reason by +induction on the successive components. + +\smallskip + +Double induction (or induction on pairs) is a restriction of the +lexicographic induction. Here is an example of double induction. + +\begin{coq_example} +Lemma nat_double_ind : +forall P : nat -> nat -> Prop, P 0 0 -> + (forall m n, P m n -> P m (S n)) -> + (forall m n, P m n -> P (S m) n) -> + forall m n, P m n. +intros P H00 HmS HSn; induction m. +(* case 0 *) +induction n; [assumption | apply HmS; apply IHn]. +(* case Sm *) +intro n; apply HSn; apply IHm. +\end{coq_example} +\begin{coq_eval} +Qed. +\end{coq_eval} + +\Question{How to define a function by nested recursion?} + + The same trick applies. Here is the example of Ackermann +function. + +\begin{coq_example} +Fixpoint ack (n:nat) : nat -> nat := + match n with + | O => S + | S n' => + (fix ack' (m:nat) : nat := + match m with + | O => ack n' 1 + | S m' => ack n' (ack' m') + end) + end. +\end{coq_example} + + +\subsection{Co-inductive types} + +\Question{I have a cofixpoint $t:=F(t)$ and I want to prove $t=F(t)$. How to do it?} + +Just case-expand $F({\tt t})$ then complete by a trivial case analysis. +Here is what it gives on e.g. the type of streams on naturals + +\begin{coq_eval} +Set Implicit Arguments. +\end{coq_eval} +\begin{coq_example} +CoInductive Stream (A:Set) : Set := + Cons : A -> Stream A -> Stream A. +CoFixpoint nats (n:nat) : Stream nat := Cons n (nats (S n)). +Lemma Stream_unfold : + forall n:nat, nats n = Cons n (nats (S n)). +Proof. + intro; + change (nats n = match nats n with + | Cons x s => Cons x s + end). + case (nats n); reflexivity. +Qed. +\end{coq_example} + + + +\section{Syntax and notations} + +\Question{I do not want to type ``forall'' because it is too long, what can I do?} + +You can define your own notation for forall: +\begin{verbatim} +Notation "fa x : t, P" := (forall x:t, P) (at level 200, x ident). +\end{verbatim} +or if your are using {\CoqIde} you can define a pretty symbol for for all and an input method (see \ref{forallcoqide}). + + + +\Question{How can I define a notation for square?} + +You can use for instance: +\begin{verbatim} +Notation "x ^2" := (Rmult x x) (at level 20). +\end{verbatim} +Note that you can not use: +\begin{texttt} +Notation "x $^²$" := (Rmult x x) (at level 20). +\end{texttt} +because ``$^2$'' is an iso-latin character. If you really want this kind of notation you should use UTF-8. + + +\Question{Why ``no associativity'' and ``left associativity'' at the same level does not work?} + +Because we relie on camlp4 for syntactical analysis and camlp4 does not really implement no associativity. By default, non associative operators are defined as right associative. + + + +\Question{How can I know the associativity associated with a level?} + +You can do ``Print Grammar constr'', and decode the output from camlp4, good luck ! + +\section{Modules} + + + + +%%%%%%% +\section{\Ltac} + +\Question{What is {\Ltac}?} + +{\Ltac} is the tactic language for \Coq. It provides the user with a +high-level ``toolbox'' for tactic creation. + +\Question{Why do I always get the same error message?} + + +\Question{Is there any printing command in {\Ltac}?} + +You can use the {\idtac} tactic with a string argument. This string +will be printed out. The same applies to the {\fail} tactic + +\Question{What is the syntax for let in {\Ltac}?} + +If $x_i$ are identifiers and $e_i$ and $expr$ are tactic expressions, then let reads: +\begin{center} +{\tt let $x_1$:=$e_1$ with $x_2$:=$e_2$\ldots with $x_n$:=$e_n$ in +$expr$}. +\end{center} +Beware that if $expr$ is complex (i.e. features at least a sequence) parenthesis +should be added around it. For example: +\begin{coq_example} +Ltac twoIntro := let x:=intro in (x;x). +\end{coq_example} + +\Question{What is the syntax for pattern matching in {\Ltac}?} + +Pattern matching on a term $expr$ (non-linear first order unification) +with patterns $p_i$ and tactic expressions $e_i$ reads: +\begin{center} +\hspace{10ex} +{\tt match $expr$ with +\hspace*{2ex}$p_1$ => $e_1$ +\hspace*{1ex}\textbar$p_2$ => $e_2$ +\hspace*{1ex}\ldots +\hspace*{1ex}\textbar$p_n$ => $e_n$ +\hspace*{1ex}\textbar\ \textunderscore\ => $e_{n+1}$ +end. +} +\end{center} +Underscore matches all terms. + +\Question{What is the semantics for ``match goal''?} + +The semantics of {\tt match goal} depends on whether it returns +tactics or not. The {\tt match goal} expression matches the current +goal against a series of patterns: {$hyp_1 {\ldots} hyp_n$ \textbar- +$ccl$}. It uses a first-order unification algorithm and in case of +success, if the right-hand-side is an expression, it tries to type it +while if the right-hand-side is a tactic, it tries to apply it. If the +typing or the tactic application fails, the {\tt match goal} tries all +the possible combinations of $hyp_i$ before dropping the branch and +moving to the next one. Underscore matches all terms. + +\Question{Why can't I use a ``match goal'' returning a tactic in a non +tail-recursive position?} + +This is precisely because the semantics of {\tt match goal} is to +apply the tactic on the right as soon as a pattern unifies what is +meaningful only in tail-recursive uses. + +The semantics in non tail-recursive call could have been the one used +for terms (i.e. fail if the tactic expression is not typable, but +don't try to apply it). For uniformity of semantics though, this has +been rejected. + +\Question{How can I generate a new name?} + +You can use the following syntax: +{\tt let id:=fresh in \ldots}\\ +For example: +\begin{coq_example} +Ltac introIdGen := let id:=fresh in intro id. +\end{coq_example} + + +\iffalse +\Question{How can I access the type of a term?} + +You can use typeof. +todo +\fi + +\Question{How can I define static and dynamic code?} + +\section{Tactics written in Ocaml} + +\Question{Can you show me an example of a tactic written in OCaml?} + +You have some examples of tactics written in Ocaml in the ``contrib'' directory of {\Coq} sources. + + + + +\section{Case studies} + + +\Question{How can I define vectors or lists of size n?} + +\Question{How to prove that 2 sets are different?} + + You need to find a property true on one set and false on the +other one. As an example we show how to prove that {\tt bool} and {\tt +nat} are discriminable. As discrimination property we take the +property to have no more than 2 elements. + +\begin{coq_example*} +Theorem nat_bool_discr : bool <> nat. +Proof. + pose (discr := + fun X:Set => + ~ (forall a b:X, ~ (forall x:X, x <> a -> x <> b -> False))). + intro Heq; assert (H: discr bool). + intro H; apply (H true false); destruct x; auto. + rewrite Heq in H; apply H; clear H. + destruct a; destruct b as [|n]; intro H0; eauto. + destruct n; [ apply (H0 2); discriminate | eauto ]. +Qed. +\end{coq_example*} + +\Question{Is there an axiom-free proof of Streicher's axiom $K$ for +the equality on {\tt nat}?} +\label{K-nat} + +Yes, because equality is decidable on {\tt nat}. Here is the proof. + +\begin{coq_example*} +Require Import Eqdep_dec. +Require Import Peano_dec. +Theorem K_nat : + forall (x:nat) (P:x = x -> Prop), P (refl_equal x) -> forall p:x = x, P p. +Proof. +intros; apply K_dec_set with (p := p). +apply eq_nat_dec. +assumption. +Qed. +\end{coq_example*} + +Similarly, we have + +\begin{coq_example*} +Theorem eq_rect_eq_nat : + forall (p:nat) (Q:nat->Type) (x:Q p) (h:p=p), x = eq_rect p Q x p h. +Proof. +intros; apply K_nat with (p := h); reflexivity. +Qed. +\end{coq_example*} + +\Question{How to prove that two proofs of {\tt n<=m} on {\tt nat} are equal?} +\label{le-uniqueness} + +This is provable without requiring any axiom because axiom $K$ +directly holds on {\tt nat}. Here is a proof using question \ref{K-nat}. + +\begin{coq_example*} +Require Import Arith. +Scheme le_ind' := Induction for le Sort Prop. +Theorem le_uniqueness_proof : forall (n m : nat) (p q : n <= m), p = q. +Proof. +induction p using le_ind'; intro q. + replace (le_n n) with + (eq_rect _ (fun n0 => n <= n0) (le_n n) _ (refl_equal n)). + 2:reflexivity. + generalize (refl_equal n). + pattern n at 2 4 6 10, q; case q; [intro | intros m l e]. + rewrite <- eq_rect_eq_nat; trivial. + contradiction (le_Sn_n m); rewrite <- e; assumption. + replace (le_S n m p) with + (eq_rect _ (fun n0 => n <= n0) (le_S n m p) _ (refl_equal (S m))). + 2:reflexivity. + generalize (refl_equal (S m)). + pattern (S m) at 1 3 4 6, q; case q; [intro Heq | intros m0 l HeqS]. + contradiction (le_Sn_n m); rewrite Heq; assumption. + injection HeqS; intro Heq; generalize l HeqS. + rewrite <- Heq; intros; rewrite <- eq_rect_eq_nat. + rewrite (IHp l0); reflexivity. +Qed. +\end{coq_example*} + +\Question{How to exploit equalities on sets} + +To extract information from an equality on sets, you need to +find a predicate of sets satisfied by the elements of the sets. As an +example, let's consider the following theorem. + +\begin{coq_example*} +Theorem interval_discr : + forall m n:nat, + {x : nat | x <= m} = {x : nat | x <= n} -> m = n. +\end{coq_example*} + +We have a proof requiring the axiom of proof-irrelevance. We +conjecture that proof-irrelevance can be circumvented by introducing a +primitive definition of discrimination of the proofs of +\verb!{x : nat | x <= m}!. + +\begin{latexonly}% +The proof can be found in file {\tt interval$\_$discr.v} in this directory. +%Here is the proof +%\begin{small} +%\begin{flushleft} +%\begin{texttt} +%\def_{\ifmmode\sb\else\subscr\fi} +%\include{interval_discr.v} +%%% WARNING semantics of \_ has changed ! +%\end{texttt} +%$a\_b\_c$ +%\end{flushleft} +%\end{small} +\end{latexonly}% +\begin{htmlonly}% +\ahref{./interval_discr.v}{Here} is the proof. +\end{htmlonly} + +\Question{I have a problem of dependent elimination on +proofs, how to solve it?} + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{coq_example*} +Inductive Def1 : Set := c1 : Def1. +Inductive DefProp : Def1 -> Prop := + c2 : forall d:Def1, DefProp d. +Inductive Comb : Set := + c3 : forall d:Def1, DefProp d -> Comb. +Lemma eq_comb : + forall (d1 d1':Def1) (d2:DefProp d1) (d2':DefProp d1'), + d1 = d1' -> c3 d1 d2 = c3 d1' d2'. +\end{coq_example*} + + You need to derive the dependent elimination +scheme for DefProp by hand using {\coqtt Scheme}. + +\begin{coq_eval} +Abort. +\end{coq_eval} + +\begin{coq_example*} +Scheme DefProp_elim := Induction for DefProp Sort Prop. +Lemma eq_comb : + forall d1 d1':Def1, + d1 = d1' -> + forall (d2:DefProp d1) (d2':DefProp d1'), c3 d1 d2 = c3 d1' d2'. +intros. +destruct H. +destruct d2 using DefProp_elim. +destruct d2' using DefProp_elim. +reflexivity. +Qed. +\end{coq_example*} + + +\Question{And what if I want to prove the following?} + +\begin{coq_example*} +Inductive natProp : nat -> Prop := + | p0 : natProp 0 + | pS : forall n:nat, natProp n -> natProp (S n). +Inductive package : Set := + pack : forall n:nat, natProp n -> package. +Lemma eq_pack : + forall n n':nat, + n = n' -> + forall (np:natProp n) (np':natProp n'), pack n np = pack n' np'. +\end{coq_example*} + + + +\begin{coq_eval} +Abort. +\end{coq_eval} +\begin{coq_example*} +Scheme natProp_elim := Induction for natProp Sort Prop. +Definition pack_S : package -> package. +destruct 1. +apply (pack (S n)). +apply pS; assumption. +Defined. +Lemma eq_pack : + forall n n':nat, + n = n' -> + forall (np:natProp n) (np':natProp n'), pack n np = pack n' np'. +intros n n' Heq np np'. +generalize dependent n'. +induction np using natProp_elim. +induction np' using natProp_elim; intros; auto. + discriminate Heq. +induction np' using natProp_elim; intros; auto. + discriminate Heq. +change (pack_S (pack n np) = pack_S (pack n0 np')). +apply (f_equal (A:=package)). +apply IHnp. +auto. +Qed. +\end{coq_example*} + + + + + + + +\section{Publishing tools} + +\Question{How can I generate some latex from my development?} + +You can use {\tt coqdoc}. + +\Question{How can I generate some HTML from my development?} + +You can use {\tt coqdoc}. + +\Question{How can I generate some dependency graph from my development?} + +\Question{How can I cite some {\Coq} in my latex document?} + +You can use {\tt coq\_tex}. + +\Question{How can I cite the {\Coq} reference manual?} + +You can use this bibtex entry: +\begin{verbatim} +@Manual{Coq:manual, + title = {The Coq proof assistant reference manual}, + author = {\mbox{The Coq development team}}, + organization = {LogiCal Project}, + note = {Version 8.0}, + year = {2004}, + url = "http://coq.inria.fr" +} +\end{verbatim} + +\Question{Where can I publish my developments in {\Coq}?} + +You can submit your developments as a user contribution to the {\Coq} +development team. This ensures its liveness along the evolution and +possible changes of {\Coq}. + +You can also submit your developments to the HELM/MoWGLI repository at +the University of Bologna (see +\ahref{http://mowgli.cs.unibo.it}{\url{http://mowgli.cs.unibo.it}}). For +developments submitted in this database, it is possible to visualize +the developments in natural language and execute various retrieving +requests. + +\Question{How can I read my proof in natural language?} + +You can submit your proof to the HELM/MoWGLI repository and use the +rendering tool provided by the server (see +\ahref{http://mowgli.cs.unibo.it}{\url{http://mowgli.cs.unibo.it}}). + +\section{\CoqIde} + +\Question{What is {\CoqIde}?} + +{\CoqIde} is a gtk based GUI for \Coq. + +\Question{How to enable Emacs keybindings?} + Insert \texttt{gtk-key-theme-name = "Emacs"} + in your \texttt{.coqide-gtk2rc} file. It may be in the current dir + or in \verb#$HOME# dir. This is done by default. + +%$ juste pour que la coloration emacs marche + +\Question{How to enable antialiased fonts?} + + Set the \verb#GDK_USE_XFT# variable to \verb#1#. This is by default with \verb#Gtk >= 2.2#. + If some of your fonts are not available, set \verb#GDK_USE_XFT# to \verb#0#. + +\Question{How to use those Forall and Exists pretty symbols?}\label{forallcoqide} + Thanks to the notation features in \Coq, you just need to insert these +lines in your {\Coq} buffer:\\ +\begin{texttt} +Notation "$\forall$ x : t, P" := (forall x:t, P) (at level 200, x ident). +\end{texttt}\\ +\begin{texttt} +Notation "$\exists$ x : t, P" := (exists x:t, P) (at level 200, x ident). +\end{texttt} + +Copy/Paste of these lines from this file will not work outside of \CoqIde. +You need to load a file containing these lines or to enter the $\forall$ +using an input method (see \ref{inputmeth}). To try it just use \verb#Require Import utf8# from inside +\CoqIde. +To enable these notations automatically start coqide with +\begin{verbatim} + coqide -l utf8 +\end{verbatim} +In the ide subdir of {\Coq} library, you will find a sample utf8.v with some +pretty simple notations. + +\Question{How to define an input method for non ASCII symbols?}\label{inputmeth} + +\begin{itemize} +\item First solution: type \verb#2200# to enter a forall in the script widow. + 2200 is the hexadecimal code for forall in unicode charts and is encoded as + in UTF-8. + 2203 is for exists. See \ahref{http://www.unicode.org}{\url{http://www.unicode.org}} for more codes. +\item Second solution: rebind \verb#a# to forall and \verb#e# to exists. + Under X11, you need to use something like +\begin{verbatim} + xmodmap -e "keycode 24 = a A F13 F13" + xmodmap -e "keycode 26 = e E F14 F14" +\end{verbatim} + and then to add +\begin{verbatim} + bind "F13" {"insert-at-cursor" ("")} + bind "F14" {"insert-at-cursor" ("")} +\end{verbatim} + to your "binding "text"" section in \verb#.coqiderc-gtk2rc.# + The strange ("") argument is the UTF-8 encoding for + 0x2200. + You can compute these encodings using the lablgtk2 toplevel with +\begin{verbatim} +Glib.Utf8.from_unichar 0x2200;; +\end{verbatim} + Further symbols can be bound on higher Fxx keys or on even on other keys you + do not need . +\end{itemize} + +\Question{How to build a custom {\CoqIde} with user ml code?} + Use + coqmktop -ide -byte m1.cmo...mi.cmo + or + coqmktop -ide -opt m1.cmx...mi.cmx + +\Question{How to customize the shortcuts for menus?} + Two solutions are offered: +\begin{itemize} +\item Edit \$HOME/.coqide.keys by hand or +\item Add "gtk-can-change-accels = 1" in your .coqide-gtk2rc file. Then + from \CoqIde, you may select a menu entry and press the desired + shortcut. +\end{itemize} + +\Question{What encoding should I use? What is this $\backslash$x\{iiii\} in my file?} + The encoding option is related to the way files are saved. + Keep it as UTF-8 until it becomes important for you to exchange files + with non UTF-8 aware applications. + If you choose something else than UTF-8, then missing characters will + be encoded by $\backslash$x\{....\} or $\backslash$x\{........\} + where each dot is an hex. digit. + The number between braces is the hexadecimal UNICODE index for the + missing character. + + + + +\section{Extraction} + +\Question{What is program extraction?} + +Program extraction consist in generating a program from a constructive proof. + +\Question{Which language can I extract to?} + +You can extract your programs to Objective Caml and Haskell. + +\Question{How can I extract an incomplete proof?} + +You can provide programs for your axioms. + + + +%%%%%%% +\section{Glossary} + +\Question{Can you explain me what an evaluable constant is?} + +An evaluable constant is a constant which is unfoldable. + +\Question{What is a goal?} + +The goal is the statement to be proved. + +\Question{What is a meta variable?} + +A meta variable in {\Coq} represents a ``hole'', i.e. a part of a proof +that is still unknown. + +\Question{What is Gallina?} + +Gallina is the specification language of \Coq. Complete documentation +of this language can be found in the Reference Manual. + +\Question{What is The Vernacular?} + +It is the language of commands of Gallina i.e. definitions, lemmas, {\ldots} + + +\Question{What is a dependent type?} + +A dependant type is a type which depends on some term. For instance +``vector of size n'' is a dependant type representing all the vectors +of size $n$. Its type depends on $n$ + +\Question{What is a proof by reflection?} + +This is a proof generated by some computation which is done using the +internal reduction of {\Coq} (not using the tactic language of {\Coq} +(\Ltac) nor the implementation language for \Coq). An example of +tactic using the reflection mechanism is the {\ring} tactic. The +reflection method consist in reflecting a subset of {\Coq} language (for +example the arithmetical expressions) into an object of the \Coq +language itself (in this case an inductive type denoting arithmetical +expressions). For more information see~\cite{howe,harrison,boutin} +and the last chapter of the Coq'Art. + +\Question{What is intuitionistic logic?} + +This is any logic which does not assume that ``A or not A''. + + +\Question{What is proof-irrelevance?} + +See question \ref{proof-irrelevance} + + +\Question{What is the difference between opaque and transparent?}{\label{opaque}} + +Opaque definitions can not be unfolded but transparent ones can. + + +\section{Troubleshooting} + +\Question{What can I do when {\tt Qed.} is slow?} + +Sometime you can use the {\abstracttac} tactic, which makes as if you had +stated some local lemma, this speeds up the typing process. + +\Question{Why \texttt{Reset Initial.} does not work when using \texttt{coqc}?} + +The initial state corresponds to the state of coqtop when the interactive +session began. It does not make sense in files to compile. + + +\Question{What can I do if I get ``No more subgoals but non-instantiated existential variables''?} + +This means that {\eauto} or {\eapply} didn't instantiate an +existential variable which eventually got erased by some computation. +You have to backtrack to the faulty occurrence of {\eauto} or +{\eapply} and give the missing argument an explicit value. + +\Question{What can I do if I get ``Cannot solve a second-order unification problem''?} + +You can help {\Coq} using the {\pattern} tactic. + +\Question{Why does {\Coq} tell me that \texttt{\{x:A|(P x)\}} is not convertible with \texttt{(sig A P)}?} + + This is because \texttt{\{x:A|P x\}} is a notation for +\texttt{sig (fun x:A => P x)}. Since {\Coq} does not reason up to +$\eta$-conversion, this is different from \texttt{sig P}. + + +\Question{I copy-paste a term and {\Coq} says it is not convertible + to the original term. Sometimes it even says the copied term is not +well-typed.} + + This is probably due to invisible implicit information (implicit +arguments, coercions and Cases annotations) in the printed term, which +is not re-synthesised from the copied-pasted term in the same way as +it is in the original term. + + Consider for instance {\tt (@eq Type True True)}. This term is +printed as {\tt True=True} and re-parsed as {\tt (@eq Prop True +True)}. The two terms are not convertible (hence they fool tactics +like {\tt pattern}). + + There is currently no satisfactory answer to the problem. However, +the command {\tt Set Printing All} is useful for diagnosing the +problem. + + Due to coercions, one may even face type-checking errors. In some +rare cases, the criterion to hide coercions is a bit too loose, which +may result in a typing error message if the parser is not able to find +again the missing coercion. + + + +\section{Conclusion and Farewell.} +\label{ccl} + +\Question{What if my question isn't answered here?} +\label{lastquestion} + +Don't panic \verb+:-)+. You can try the {\Coq} manual~\cite{Coq:manual} for a technical +description of the prover. The Coq'Art~\cite{Coq:coqart} is the first +book written on {\Coq} and provides a comprehensive review of the +theorem prover as well as a number of example and exercises. Finally, +the tutorial~\cite{Coq:Tutorial} provides a smooth introduction to +theorem proving in \Coq. + + +%%%%%%% +\newpage +\nocite{LaTeX:intro} +\nocite{LaTeX:symb} +\bibliography{fk} + +%%%%%%% +\typeout{*********************************************} +\typeout{********* That makes \thequestion{\space} questions **********} +\typeout{*********************************************} + +\end{document} diff --git a/doc/faq/axioms.eps b/doc/faq/axioms.eps new file mode 100644 index 00000000..3f3c01c4 --- /dev/null +++ b/doc/faq/axioms.eps @@ -0,0 +1,378 @@ +%!PS-Adobe-2.0 EPSF-2.0 +%%Title: axioms.fig +%%Creator: fig2dev Version 3.2 Patchlevel 4 +%%CreationDate: Wed May 5 18:30:03 2004 +%%For: herbelin@limoux.polytechnique.fr (Hugo Herbelin) +%%BoundingBox: 0 0 437 372 +%%Magnification: 1.0000 +%%EndComments +/$F2psDict 200 dict def +$F2psDict begin +$F2psDict /mtrx matrix put +/col-1 {0 setgray} bind def +/col0 {0.000 0.000 0.000 srgb} bind def +/col1 {0.000 0.000 1.000 srgb} bind def +/col2 {0.000 1.000 0.000 srgb} bind def +/col3 {0.000 1.000 1.000 srgb} bind def +/col4 {1.000 0.000 0.000 srgb} bind def +/col5 {1.000 0.000 1.000 srgb} bind def +/col6 {1.000 1.000 0.000 srgb} bind def +/col7 {1.000 1.000 1.000 srgb} bind def +/col8 {0.000 0.000 0.560 srgb} bind def +/col9 {0.000 0.000 0.690 srgb} bind def +/col10 {0.000 0.000 0.820 srgb} bind def +/col11 {0.530 0.810 1.000 srgb} bind def +/col12 {0.000 0.560 0.000 srgb} bind def +/col13 {0.000 0.690 0.000 srgb} bind def +/col14 {0.000 0.820 0.000 srgb} bind def +/col15 {0.000 0.560 0.560 srgb} bind def +/col16 {0.000 0.690 0.690 srgb} bind def +/col17 {0.000 0.820 0.820 srgb} bind def +/col18 {0.560 0.000 0.000 srgb} bind def +/col19 {0.690 0.000 0.000 srgb} bind def +/col20 {0.820 0.000 0.000 srgb} bind def +/col21 {0.560 0.000 0.560 srgb} bind def +/col22 {0.690 0.000 0.690 srgb} bind def +/col23 {0.820 0.000 0.820 srgb} bind def +/col24 {0.500 0.190 0.000 srgb} bind def +/col25 {0.630 0.250 0.000 srgb} bind def +/col26 {0.750 0.380 0.000 srgb} bind def +/col27 {1.000 0.500 0.500 srgb} bind def +/col28 {1.000 0.630 0.630 srgb} bind def +/col29 {1.000 0.750 0.750 srgb} bind def +/col30 {1.000 0.880 0.880 srgb} bind def +/col31 {1.000 0.840 0.000 srgb} bind def + +end +save +newpath 0 372 moveto 0 0 lineto 437 0 lineto 437 372 lineto closepath clip newpath +-90.0 435.2 translate +1 -1 scale + +/cp {closepath} bind def +/ef {eofill} bind def +/gr {grestore} bind def +/gs {gsave} bind def +/sa {save} bind def +/rs {restore} bind def +/l {lineto} bind def +/m {moveto} bind def +/rm {rmoveto} bind def +/n {newpath} bind def +/s {stroke} bind def +/sh {show} bind def +/slc {setlinecap} bind def +/slj {setlinejoin} bind def +/slw {setlinewidth} bind def +/srgb {setrgbcolor} bind def +/rot {rotate} bind def +/sc {scale} bind def +/sd {setdash} bind def +/ff {findfont} bind def +/sf {setfont} bind def +/scf {scalefont} bind def +/sw {stringwidth} bind def +/tr {translate} bind def +/tnt {dup dup currentrgbcolor + 4 -2 roll dup 1 exch sub 3 -1 roll mul add + 4 -2 roll dup 1 exch sub 3 -1 roll mul add + 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} + bind def +/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul + 4 -2 roll mul srgb} bind def +/reencdict 12 dict def /ReEncode { reencdict begin +/newcodesandnames exch def /newfontname exch def /basefontname exch def +/basefontdict basefontname findfont def /newfont basefontdict maxlength dict def +basefontdict { exch dup /FID ne { dup /Encoding eq +{ exch dup length array copy newfont 3 1 roll put } +{ exch newfont 3 1 roll put } ifelse } { pop pop } ifelse } forall +newfont /FontName newfontname put newcodesandnames aload pop +128 1 255 { newfont /Encoding get exch /.notdef put } for +newcodesandnames length 2 idiv { newfont /Encoding get 3 1 roll put } repeat +newfontname newfont definefont pop end } def +/isovec [ +8#055 /minus 8#200 /grave 8#201 /acute 8#202 /circumflex 8#203 /tilde +8#204 /macron 8#205 /breve 8#206 /dotaccent 8#207 /dieresis +8#210 /ring 8#211 /cedilla 8#212 /hungarumlaut 8#213 /ogonek 8#214 /caron +8#220 /dotlessi 8#230 /oe 8#231 /OE +8#240 /space 8#241 /exclamdown 8#242 /cent 8#243 /sterling +8#244 /currency 8#245 /yen 8#246 /brokenbar 8#247 /section 8#250 /dieresis +8#251 /copyright 8#252 /ordfeminine 8#253 /guillemotleft 8#254 /logicalnot +8#255 /hyphen 8#256 /registered 8#257 /macron 8#260 /degree 8#261 /plusminus +8#262 /twosuperior 8#263 /threesuperior 8#264 /acute 8#265 /mu 8#266 /paragraph +8#267 /periodcentered 8#270 /cedilla 8#271 /onesuperior 8#272 /ordmasculine +8#273 /guillemotright 8#274 /onequarter 8#275 /onehalf +8#276 /threequarters 8#277 /questiondown 8#300 /Agrave 8#301 /Aacute +8#302 /Acircumflex 8#303 /Atilde 8#304 /Adieresis 8#305 /Aring +8#306 /AE 8#307 /Ccedilla 8#310 /Egrave 8#311 /Eacute +8#312 /Ecircumflex 8#313 /Edieresis 8#314 /Igrave 8#315 /Iacute +8#316 /Icircumflex 8#317 /Idieresis 8#320 /Eth 8#321 /Ntilde 8#322 /Ograve +8#323 /Oacute 8#324 /Ocircumflex 8#325 /Otilde 8#326 /Odieresis 8#327 /multiply +8#330 /Oslash 8#331 /Ugrave 8#332 /Uacute 8#333 /Ucircumflex +8#334 /Udieresis 8#335 /Yacute 8#336 /Thorn 8#337 /germandbls 8#340 /agrave +8#341 /aacute 8#342 /acircumflex 8#343 /atilde 8#344 /adieresis 8#345 /aring +8#346 /ae 8#347 /ccedilla 8#350 /egrave 8#351 /eacute +8#352 /ecircumflex 8#353 /edieresis 8#354 /igrave 8#355 /iacute +8#356 /icircumflex 8#357 /idieresis 8#360 /eth 8#361 /ntilde 8#362 /ograve +8#363 /oacute 8#364 /ocircumflex 8#365 /otilde 8#366 /odieresis 8#367 /divide +8#370 /oslash 8#371 /ugrave 8#372 /uacute 8#373 /ucircumflex +8#374 /udieresis 8#375 /yacute 8#376 /thorn 8#377 /ydieresis] def +/Times-Roman /Times-Roman-iso isovec ReEncode +/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def +/$F2psEnd {$F2psEnteredState restore end} def + +$F2psBegin +10 setmiterlimit +0 slj 0 slc + 0.06000 0.06000 sc +% +% Fig objects follow +% +% +% here starts figure with depth 50 +% Arc +7.500 slw +gs clippath +3599 6933 m 3626 6879 l 3492 6812 l 3586 6893 l 3465 6865 l cp +eoclip +n 3600.0 6750.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3465 6865 m 3586 6893 l 3492 6812 l 3465 6865 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +3599 6633 m 3626 6579 l 3492 6512 l 3586 6593 l 3465 6565 l cp +eoclip +n 3600.0 6450.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3465 6565 m 3586 6593 l 3492 6512 l 3465 6565 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +3626 6020 m 3599 5966 l 3465 6034 l 3586 6007 l 3492 6087 l cp +3599 6333 m 3626 6279 l 3492 6212 l 3586 6293 l 3465 6265 l cp +eoclip +n 3600.0 6150.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3492 6087 m 3586 6007 l 3465 6034 l 3492 6087 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 3465 6265 m 3586 6293 l 3492 6212 l 3465 6265 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +3626 6320 m 3599 6266 l 3465 6334 l 3586 6307 l 3492 6387 l cp +3599 6633 m 3626 6579 l 3492 6512 l 3586 6593 l 3465 6565 l cp +eoclip +n 3600.0 6450.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3492 6387 m 3586 6307 l 3465 6334 l 3492 6387 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 3465 6565 m 3586 6593 l 3492 6512 l 3465 6565 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +3626 6620 m 3599 6566 l 3465 6634 l 3586 6607 l 3492 6687 l cp +3599 6933 m 3626 6879 l 3492 6812 l 3586 6893 l 3465 6865 l cp +eoclip +n 3600.0 6750.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3492 6687 m 3586 6607 l 3465 6634 l 3492 6687 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 3465 6865 m 3586 6893 l 3492 6812 l 3465 6865 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +3626 6920 m 3599 6866 l 3465 6934 l 3586 6907 l 3492 6987 l cp +3599 7233 m 3626 7179 l 3492 7112 l 3586 7193 l 3465 7165 l cp +eoclip +n 3600.0 7050.0 150.0 90.0 -90.0 arc +gs col0 s gr + gr + +% arrowhead +n 3492 6987 m 3586 6907 l 3465 6934 l 3492 6987 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 3465 7165 m 3586 7193 l 3492 7112 l 3465 7165 l cp gs 0.00 setgray ef gr col0 s +% Arc +gs clippath +4168 4060 m 4227 4068 l 4247 3919 l 4202 4034 l 4188 3911 l cp +eoclip +n 14032.5 5272.5 9908.2 -159.9 -172.9 arcn +gs col0 s gr + gr + +% arrowhead +n 4188 3911 m 4202 4034 l 4247 3919 l 4188 3911 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +4170 5790 m 4230 5790 l 4230 5639 l 4200 5759 l 4170 5639 l cp +eoclip +n 4200 5175 m + 4200 5775 l gs col0 s gr gr + +% arrowhead +n 4170 5639 m 4200 5759 l 4230 5639 l 4170 5639 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +4553 5749 m 4567 5807 l 4714 5771 l 4591 5771 l 4700 5713 l cp +eoclip +n 7050 5175 m + 4575 5775 l gs col0 s gr gr + +% arrowhead +n 4700 5713 m 4591 5771 l 4714 5771 l 4700 5713 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +4170 4890 m 4230 4890 l 4230 4739 l 4200 4859 l 4170 4739 l cp +eoclip +n 4200 4275 m + 4200 4875 l gs col0 s gr gr + +% arrowhead +n 4170 4739 m 4200 4859 l 4230 4739 l 4170 4739 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +7131 4907 m 7147 4850 l 7001 4810 l 7109 4871 l 6985 4868 l cp +eoclip +n 4950 4275 m + 7125 4875 l gs col0 s gr gr + +% arrowhead +n 6985 4868 m 7109 4871 l 7001 4810 l 6985 4868 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +7167 4057 m 7225 4071 l 7262 3924 l 7204 4034 l 7204 3910 l cp +eoclip +n 7725 1950 m + 7200 4050 l gs col0 s gr gr + +% arrowhead +n 7204 3910 m 7204 4034 l 7262 3924 l 7204 3910 l cp gs 0.00 setgray ef gr col0 s +% Polyline +n 4350 3075 m + 7350 1950 l gs col0 s gr +% Polyline +gs clippath +7170 4890 m 7230 4890 l 7230 4739 l 7200 4859 l 7170 4739 l cp +eoclip +n 7200 4275 m + 7200 4875 l gs col0 s gr gr + +% arrowhead +n 7170 4739 m 7200 4859 l 7230 4739 l 7170 4739 l cp gs 0.00 setgray ef gr col0 s +% Polyline +n 3075 1875 m + 3975 1875 l gs col0 s gr +% Polyline +gs clippath +5520 4065 m 5580 4065 l 5580 3914 l 5550 4034 l 5520 3914 l cp +5580 3660 m 5520 3660 l 5520 3811 l 5550 3691 l 5580 3811 l cp +eoclip +n 5550 3675 m + 5550 4050 l gs col0 s gr gr + +% arrowhead +n 5580 3811 m 5550 3691 l 5520 3811 l 5580 3811 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 5520 3914 m 5550 4034 l 5580 3914 l 5520 3914 l cp gs 0.00 setgray ef gr col0 s +% Polyline +n 4575 4050 m + 6450 4050 l gs col0 s gr +% Polyline +gs clippath +3495 2265 m 3555 2265 l 3555 2114 l 3525 2234 l 3495 2114 l cp +3555 1860 m 3495 1860 l 3495 2011 l 3525 1891 l 3555 2011 l cp +eoclip +n 3525 1875 m + 3525 2250 l gs col0 s gr gr + +% arrowhead +n 3555 2011 m 3525 1891 l 3495 2011 l 3555 2011 l cp gs 0.00 setgray ef gr col0 s +% arrowhead +n 3495 2114 m 3525 2234 l 3555 2114 l 3495 2114 l cp gs 0.00 setgray ef gr col0 s +% Polyline +gs clippath +2219 3988 m 2279 3991 l 2285 3840 l 2251 3959 l 2225 3838 l cp +eoclip +n 2325 1875 m + 2250 3975 l gs col0 s gr gr + +% arrowhead +n 2225 3838 m 2251 3959 l 2285 3840 l 2225 3838 l cp gs 0.00 setgray ef gr col0 s +% Polyline +n 7800 1275 m + 2100 1275 l gs col0 s gr +/Times-Roman-iso ff 180.00 scf sf +6600 5100 m +gs 1 -1 sc (Proof-irrelevance) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3675 4200 m +gs 1 -1 sc (Excluded-middle) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +6900 1800 m +gs 1 -1 sc (Predicate extensionality) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3375 3525 m +gs 1 -1 sc (\(Diaconescu\)) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +4650 3600 m +gs 1 -1 sc (Propositional degeneracy) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3825 1800 m +gs 1 -1 sc (Relational choice axiom) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +1725 1800 m +gs 1 -1 sc (Description principle) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +2550 2400 m +gs 1 -1 sc (Functional choice axiom) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 5100 m +gs 1 -1 sc (Decidability of equality on $A$) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +4425 4575 m +gs 1 -1 sc (\(needs Prop-impredicativity\)) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +5025 4725 m +gs 1 -1 sc (\(Berardi\)) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +1500 3075 m +gs 1 -1 sc (\(if Set impredicative\)) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +1500 4200 m +gs 1 -1 sc (Not excluded-middle) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 6000 m +gs 1 -1 sc (Axiom K on A) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 7200 m +gs 1 -1 sc (Invariance by substitution of reflexivity proofs for equality on A) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +6150 4200 m +gs 1 -1 sc (Propositional extensionality) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +2100 1200 m +gs 1 -1 sc (The dependency graph of axioms in the Calculus of Inductive Constructions) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 6900 m +gs 1 -1 sc (Injectivity of equality on sigma-types on A) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 6300 m +gs 1 -1 sc (Uniqueness of reflexivity proofs for equality on A) col0 sh gr +/Times-Roman-iso ff 180.00 scf sf +3600 6600 m +gs 1 -1 sc (Uniqueness of equality proofs on A) col0 sh gr +% here ends figure; +$F2psEnd +rs +showpage diff --git a/doc/faq/axioms.fig b/doc/faq/axioms.fig new file mode 100644 index 00000000..f0775930 --- /dev/null +++ b/doc/faq/axioms.fig @@ -0,0 +1,84 @@ +#FIG 3.2 +Landscape +Center +Inches +Letter +100.00 +Single +-2 +1200 2 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 1 3600.000 6750.000 3600 6900 3450 6750 3600 6600 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 1 3600.000 6450.000 3600 6600 3450 6450 3600 6300 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 6150.000 3600 6300 3450 6150 3600 6000 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 6450.000 3600 6600 3450 6450 3600 6300 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 6750.000 3600 6900 3450 6750 3600 6600 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 0 1 1 3600.000 7050.000 3600 7200 3450 7050 3600 6900 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 +5 1 0 1 0 7 50 -1 -1 0.000 0 1 1 0 14032.500 5272.500 4725 1875 4425 2850 4200 4050 + 1 1 1.00 60.00 120.00 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 4200 5175 4200 5775 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 7050 5175 4575 5775 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 4200 4275 4200 4875 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 4950 4275 7125 4875 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 7725 1950 7200 4050 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 + 4350 3075 7350 1950 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 7200 4275 7200 4875 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 + 3075 1875 3975 1875 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 + 5550 3675 5550 4050 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 + 4575 4050 6450 4050 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2 + 1 1 1.00 60.00 120.00 + 1 1 1.00 60.00 120.00 + 3525 1875 3525 2250 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2 + 1 1 1.00 60.00 120.00 + 2325 1875 2250 3975 +2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2 + 7800 1275 2100 1275 +4 0 0 50 -1 0 12 0.0000 4 135 1305 6600 5100 Proof-irrelevance\001 +4 0 0 50 -1 0 12 0.0000 4 135 1260 3675 4200 Excluded-middle\001 +4 0 0 50 -1 0 12 0.0000 4 180 1830 6900 1800 Predicate extensionality\001 +4 0 0 50 -1 0 12 0.0000 4 180 1050 3375 3525 (Diaconescu)\001 +4 0 0 50 -1 0 12 0.0000 4 180 1905 4650 3600 Propositional degeneracy\001 +4 0 0 50 -1 0 12 0.0000 4 135 1800 3825 1800 Relational choice axiom\001 +4 0 0 50 -1 0 12 0.0000 4 180 1575 1725 1800 Description principle\001 +4 0 0 50 -1 0 12 0.0000 4 135 1830 2550 2400 Functional choice axiom\001 +4 0 0 50 -1 0 12 0.0000 4 195 2340 3600 5100 Decidability of equality on $A$\001 +4 0 0 50 -1 0 12 0.0000 4 180 2175 4425 4575 (needs Prop-impredicativity)\001 +4 0 0 50 -1 0 12 0.0000 4 180 705 5025 4725 (Berardi)\001 +4 0 0 50 -1 0 12 0.0000 4 180 1620 1500 3075 (if Set impredicative)\001 +4 0 0 50 -1 0 12 0.0000 4 135 1560 1500 4200 Not excluded-middle\001 +4 0 0 50 -1 0 12 0.0000 4 135 1080 3600 6000 Axiom K on A\001 +4 0 0 50 -1 0 12 0.0000 4 180 4800 3600 7200 Invariance by substitution of reflexivity proofs for equality on A\001 +4 0 0 50 -1 0 12 0.0000 4 180 2100 6150 4200 Propositional extensionality\001 +4 0 0 50 -1 0 12 0.0000 4 180 5700 2100 1200 The dependency graph of axioms in the Calculus of Inductive Constructions\001 +4 0 0 50 -1 0 12 0.0000 4 180 3210 3600 6900 Injectivity of equality on sigma-types on A\001 +4 0 0 50 -1 0 12 0.0000 4 180 3735 3600 6300 Uniqueness of reflexivity proofs for equality on A\001 +4 0 0 50 -1 0 12 0.0000 4 180 2670 3600 6600 Uniqueness of equality proofs on A\001 diff --git a/doc/faq/axioms.png b/doc/faq/axioms.png new file mode 100644 index 00000000..2aee0916 Binary files /dev/null and b/doc/faq/axioms.png differ diff --git a/doc/faq/fk.bib b/doc/faq/fk.bib new file mode 100644 index 00000000..d41ab7f0 --- /dev/null +++ b/doc/faq/fk.bib @@ -0,0 +1,2221 @@ +%%%%%%% FAQ %%%%%%% + +@book{ProofsTypes, + Author="Girard, Jean-Yves and Yves Lafont and Paul Taylor", + Title="Proofs and Types", + Publisher="Cambrige Tracts in Theoretical Computer Science, Cambridge University Press", + Year="1989" +} + +@misc{Types:Dowek, + author = "Gilles Dowek", + title = "Th{\'e}orie des types", + year = 2002, + howpublished = "Lecture notes", + url= "http://www.lix.polytechnique.fr/~dowek/Cours/theories_des_types.ps.gz" +} + +@PHDTHESIS{EGThese, + author = {Eduardo Giménez}, + title = {Un Calcul de Constructions Infinies et son application +a la vérification de systèmes communicants}, + type = {thèse d'Université}, + school = {Ecole Normale Supérieure de Lyon}, + month = {December}, + year = {1996}, +} + + +%%%%%%% Semantique %%%%%%% + +@misc{Sem:cours, + author = "François Pottier", + title = "{Typage et Programmation}", + year = "2002", + howpublished = "Lecture notes", + note = "DEA PSPL" +} + +@inproceedings{Sem:Dubois, + author = {Catherine Dubois}, + editor = {Mark Aagaard and + John Harrison}, + title = "{Proving ML Type Soundness Within Coq}", + pages = {126-144}, + booktitle = {TPHOLs}, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + volume = {1869}, + year = {2000}, + isbn = {3-540-67863-8}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@techreport{Sem:Plotkin, +author = {Gordon D. Plotkin}, +institution = {Aarhus University}, +number = {{DAIMI FN-19}}, +title = {{A structural approach to operational semantics}}, +year = {1981} +} + +@article{Sem:RemyV98, + author = "Didier R{\'e}my and J{\'e}r{\^o}me Vouillon", + title = "Objective {ML}: + An effective object-oriented extension to {ML}", + journal = "Theory And Practice of Object Systems", + year = 1998, + volume = "4", + number = "1", + pages = "27--50", + note = {A preliminary version appeared in the proceedings + of the 24th ACM Conference on Principles + of Programming Languages, 1997} +} + +@book{Sem:Winskel, + AUTHOR = {Winskel, Glynn}, + TITLE = {The Formal Semantics of Programming Languages}, + NOTE = {WIN g2 93:1 P-Ex}, + YEAR = {1993}, + PUBLISHER = {The MIT Press}, + SERIES = {Foundations of Computing}, + } + +@Article{Sem:WrightFelleisen, + refkey = "C1210", + title = "A Syntactic Approach to Type Soundness", + author = "Andrew K. Wright and Matthias Felleisen", + pages = "38--94", + journal = "Information and Computation", + month = "15~" # nov, + year = "1994", + volume = "115", + number = "1" +} + +@inproceedings{Sem:Nipkow-MOD, + author={Tobias Nipkow}, + title={Jinja: Towards a Comprehensive Formal Semantics for a + {J}ava-like Language}, + booktitle={Proc.\ Marktobderdorf Summer School 2003}, + publisher={IOS Press},editor={H. Schwichtenberg and K. Spies}, + year=2003, + note={To appear} +} + +%%%%%%% Coq %%%%%%% + +@book{Coq:coqart, + title = "Interactive Theorem Proving and Program Development, + Coq'Art: The Calculus of Inductive Constructions", + author = "Yves Bertot and Pierre Castéran", + publisher = "Springer Verlag", + series = "Texts in Theoretical Computer Science. An + EATCS series", + year = 2004 +} + +@phdthesis{Coq:Del01, + AUTHOR = "David Delahaye", + TITLE = "Conception de langages pour décrire les preuves et les + automatisations dans les outils d'aide à la preuve", + SCHOOL = {Universit\'e Paris~6}, + YEAR = "2001", + Type = {Th\`ese de Doctorat} +} + +@techreport{Coq:gimenez-tut, + author = "Eduardo Gim\'enez", + title = "A Tutorial on Recursive Types in Coq", + number = "RT-0221", + pages = "42 p.", + url = "citeseer.nj.nec.com/gimenez98tutorial.html" } + +@phdthesis{Coq:Mun97, + AUTHOR = "César Mu{\~{n}}oz", + TITLE = "Un calcul de substitutions pour la repr\'esentation + de preuves partielles en th\'eorie de types", + SCHOOL = {Universit\'e Paris~7}, + Number = {Unit\'e de recherche INRIA-Rocquencourt, TU-0488}, + YEAR = "1997", + Note = {English version available as INRIA research report RR-3309}, + Type = {Th\`ese de Doctorat} +} + +@PHDTHESIS{Coq:Filliatre99, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Preuve de programmes imp\'eratifs en th\'eorie des types}}, + TYPE = {Th{\`e}se de Doctorat}, + SCHOOL = {Universit\'e Paris-Sud}, + YEAR = 1999, + MONTH = {July}, +} + +@manual{Coq:Tutorial, + AUTHOR = {G\'erard Huet and Gilles Kahn and Christine Paulin-Mohring}, + TITLE = {{The Coq Proof Assistant A Tutorial}}, + YEAR = 2004 +} + +%%%%%%% PVS %%%%%%% + +@manual{PVS:prover, + title = "{PVS} Prover Guide", + author = "N. Shankar and S. Owre and J. M. Rushby and D. W. J. + Stringer-Calvert", + month = sep, + year = "1999", + organization = "Computer Science Laboratory, SRI International", + address = "Menlo Park, CA", +} + +@techreport{PVS-Semantics:TR, + TITLE = {The Formal Semantics of {PVS}}, + AUTHOR = {Sam Owre and Natarajan Shankar}, + NUMBER = {CR-1999-209321}, + INSTITUTION = {Computer Science Laboratory, SRI International}, + ADDRESS = {Menlo Park, CA}, + MONTH = may, + YEAR = 1999, +} + +@techreport{PVS-Tactics:DiVito, + TITLE = {A {PVS} Prover Strategy Package for Common Manipulations}, + AUTHOR = {Ben L. Di Vito}, + NUMBER = {TM-2002-211647}, + INSTITUTION = {Langley Research Center}, + ADDRESS = {Hampton, VA}, + MONTH = apr, + YEAR = 2002, +} + +@misc{PVS-Tactics:cours, + author = "César Muñoz", + title = "Strategies in {PVS}", + howpublished = "Lecture notes", + note = "National Institute of Aerospace", + year = 2002 +} + +@techreport{PVS-Tactics:field, + author = "C. Mu{\~n}oz and M. Mayero", + title = "Real Automation in the Field", + institution = "ICASE-NASA Langley", + number = "NASA/CR-2001-211271 Interim ICASE Report No. 39", + month = "dec", + year = "2001" +} + +%%%%%%% Autres Prouveurs %%%%%%% + +@misc{ACL2:repNuPrl, + author = "James L. Caldwell and John Cowles", + title = "{Representing Nuprl Proof Objects in ACL2: toward a proof checker for Nuprl}", + url = "http://www.cs.uwyo.edu/~jlc/papers/proof_checking.ps" } + +@inproceedings{Elan:ckl-strat, + author = {H. Cirstea and C. Kirchner and L. Liquori}, + title = "{Rewrite Strategies in the Rewriting Calculus}", + booktitle = {WRLA'02}, + publisher = "{Elsevier Science B.V.}", + series = {Electronic Notes in Theoretical Computer Science}, + volume = {71}, + year = {2003}, +} + +@book{LCF:GMW, + author = {M. Gordon and R. Milner and C. Wadsworth}, + publisher = {sv}, + series = {lncs}, + volume = 78, + title = {Edinburgh {LCF}: A Mechanized Logic of Computation}, + year = 1979 +} + +%%%%%%% LaTeX %%%%%%% + +@manual{LaTeX:symb, + title = "The Great, Big List of \LaTeX\ Symbols", + author = "David Carlisle and Scott Pakin and Alexander Holt", + month = feb, + year = 2001, +} + +@manual{LaTeX:intro, + title = "The Not So Short Introduction to \LaTeX2e", + author = "Tobias Oetiker", + month = jan, + year = 1999, +} + +@MANUAL{CoqManualV7, + AUTHOR = {{The {Coq} Development Team}}, + TITLE = {{The Coq Proof Assistant Reference Manual -- Version + V7.1}}, + YEAR = {2001}, + MONTH = OCT, + NOTE = {http://coq.inria.fr} +} + +@MANUAL{CoqManual96, + TITLE = {The {Coq Proof Assistant Reference Manual} Version 6.1}, + AUTHOR = {B. Barras and S. Boutin and C. Cornes and J. Courant and + J.-C. Filli\^atre and + H. Herbelin and G. Huet and P. Manoury and C. Mu{\~{n}}oz and + C. Murthy and C. Parent and C. Paulin-Mohring and + A. Sa{\"\i}bi and B. Werner}, + ORGANIZATION = {{INRIA-Rocquencourt}-{CNRS-ENS Lyon}}, + URL = {ftp://ftp.inria.fr/INRIA/coq/V6.1/doc/Reference-Manual.dvi.gz}, + YEAR = 1996, + MONTH = DEC +} + +@MANUAL{CoqTutorial99, + AUTHOR = {G.~Huet and G.~Kahn and Ch.~Paulin-Mohring}, + TITLE = {The {\sf Coq} Proof Assistant - A tutorial - Version 6.3}, + MONTH = JUL, + YEAR = {1999}, + ABSTRACT = {http://coq.inria.fr/doc/tutorial.html} +} + +@MANUAL{CoqTutorialV7, + AUTHOR = {G.~Huet and G.~Kahn and Ch.~Paulin-Mohring}, + TITLE = {The {\sf Coq} Proof Assistant - A tutorial - Version 7.1}, + MONTH = OCT, + YEAR = {2001}, + NOTE = {http://coq.inria.fr} +} + +@TECHREPORT{modelpa2000, + AUTHOR = {B. Bérard and P. Castéran and E. Fleury and L. Fribourg + and J.-F. Monin and C. Paulin and A. Petit and D. Rouillard}, + TITLE = {Automates temporisés CALIFE}, + INSTITUTION = {Calife}, + YEAR = 2000, + URL = {http://www.loria.fr/projets/calife/WebCalifePublic/FOURNITURES/F1.1.ps.gz}, + TYPE = {Fourniture {F1.1}} +} + +@TECHREPORT{CaFrPaRo2000, + AUTHOR = {P. Castéran and E. Freund and C. Paulin and D. Rouillard}, + TITLE = {Bibliothèques Coq et Isabelle-HOL pour les systèmes de transitions et les p-automates}, + INSTITUTION = {Calife}, + YEAR = 2000, + URL = {http://www.loria.fr/projets/calife/WebCalifePublic/FOURNITURES/F5.4.ps.gz}, + TYPE = {Fourniture {F5.4}} +} + +@PROCEEDINGS{TPHOLs99, + TITLE = {International Conference on + Theorem Proving in Higher Order Logics (TPHOLs'99)}, + YEAR = 1999, + EDITOR = {Y. Bertot and G. Dowek and C. Paulin-Mohring and L. Th{\'e}ry}, + SERIES = {Lecture Notes in Computer Science}, + MONTH = SEP, + PUBLISHER = {{Sprin\-ger-Verlag}}, + ADDRESS = {Nice}, + TYPE_PUBLI = {editeur} +} + +@INPROCEEDINGS{Pau01, + AUTHOR = {Christine Paulin-Mohring}, + TITLE = {Modelisation of Timed Automata in {Coq}}, + BOOKTITLE = {Theoretical Aspects of Computer Software (TACS'2001)}, + PAGES = {298--315}, + YEAR = 2001, + EDITOR = {N. Kobayashi and B. Pierce}, + VOLUME = 2215, + SERIES = {Lecture Notes in Computer Science}, + PUBLISHER = {Springer-Verlag} +} + +@PHDTHESIS{Moh89b, + AUTHOR = {C. Paulin-Mohring}, + MONTH = JAN, + SCHOOL = {{Paris 7}}, + TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, + TYPE = {Thèse d'université}, + YEAR = {1989}, + URL = {http://www.lri.fr/~paulin/these.ps.gz} +} + +@ARTICLE{HuMo92, + AUTHOR = {G. Huet and C. Paulin-Mohring}, + EDITION = {INRIA}, + JOURNAL = {Courrier du CNRS - Informatique}, + TITLE = {Preuves et Construction de Programmes}, + YEAR = {1992}, + CATEGORY = {national} +} + +@INPROCEEDINGS{LePa94, + AUTHOR = {F. Leclerc and C. Paulin-Mohring}, + TITLE = {Programming with Streams in {Coq}. A case study : The Sieve of Eratosthenes}, + EDITOR = {H. Barendregt and T. Nipkow}, + VOLUME = 806, + SERIES = {Lecture Notes in Computer Science}, + BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, + YEAR = 1994, + PUBLISHER = {Springer-Verlag} +} + +@INPROCEEDINGS{Moh86, + AUTHOR = {C. Mohring}, + ADDRESS = {Cambridge, MA}, + BOOKTITLE = {Symposium on Logic in Computer Science}, + PUBLISHER = {IEEE Computer Society Press}, + TITLE = {Algorithm Development in the {Calculus of Constructions}}, + YEAR = {1986} +} + +@INPROCEEDINGS{Moh89a, + AUTHOR = {C. Paulin-Mohring}, + ADDRESS = {Austin}, + BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, + MONTH = JAN, + PUBLISHER = {ACM}, + TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, + YEAR = {1989} +} + +@INCOLLECTION{Moh89c, + AUTHOR = {C. Paulin-Mohring}, + TITLE = {{R\'ealisabilit\'e et extraction de programmes}}, + BOOKTITLE = {Logique et Informatique : une introduction}, + PUBLISHER = {INRIA}, + YEAR = 1991, + EDITOR = {B. Courcelle}, + VOLUME = 8, + SERIES = {Collection Didactique}, + PAGES = {163-180}, + CATEGORY = {national} +} + +@INPROCEEDINGS{Moh93, + AUTHOR = {C. Paulin-Mohring}, + BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi a +nd Applications}, + EDITOR = {M. Bezem and J.-F. Groote}, + INSTITUTION = {LIP-ENS Lyon}, + NOTE = {LIP research report 92-49}, + NUMBER = 664, + SERIES = {Lecture Notes in Computer Science}, + TITLE = {{Inductive Definitions in the System {Coq} - Rules and Properties}}, + TYPE = {research report}, + YEAR = 1993 +} + +@ARTICLE{PaWe92, + AUTHOR = {C. Paulin-Mohring and B. Werner}, + JOURNAL = {Journal of Symbolic Computation}, + TITLE = {{Synthesis of ML programs in the system Coq}}, + VOLUME = {15}, + YEAR = {1993}, + PAGES = {607--640} +} + +@INPROCEEDINGS{Pau96, + AUTHOR = {C. Paulin-Mohring}, + TITLE = {Circuits as streams in {Coq} : Verification of a sequential multiplier}, + BOOKTITLE = {Types for Proofs and Programs, TYPES'95}, + EDITOR = {S. Berardi and M. Coppo}, + SERIES = {Lecture Notes in Computer Science}, + YEAR = 1996, + VOLUME = 1158 +} + +@PHDTHESIS{Pau96b, + AUTHOR = {Christine Paulin-Mohring}, + TITLE = {Définitions Inductives en Théorie des Types d'Ordre Supérieur}, + SCHOOL = {Université Claude Bernard Lyon I}, + YEAR = 1996, + MONTH = DEC, + TYPE = {Habilitation à diriger les recherches}, + URL = {http://www.lri.fr/~paulin/habilitation.ps.gz} +} + +@INPROCEEDINGS{PfPa89, + AUTHOR = {F. Pfenning and C. Paulin-Mohring}, + BOOKTITLE = {Proceedings of Mathematical Foundations of Programming Semantics}, + NOTE = {technical report CMU-CS-89-209}, + PUBLISHER = {Springer-Verlag}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 442, + TITLE = {Inductively defined types in the {Calculus of Constructions}}, + YEAR = {1990} +} + +@MISC{krakatoa02, + AUTHOR = {Claude March\'e and Christine Paulin and Xavier Urbain}, + TITLE = {The \textsc{Krakatoa} proof tool}, + YEAR = 2002, + NOTE = {\url{http://krakatoa.lri.fr/}} +} + +@ARTICLE{marche03jlap, + AUTHOR = {Claude March{\'e} and Christine Paulin-Mohring and Xavier Urbain}, + TITLE = {The \textsc{Krakatoa} Tool for Certification of \textsc{Java/JavaCard} Programs annotated in \textsc{JML}}, + JOURNAL = {Journal of Logic and Algebraic Programming}, + YEAR = 2003, + NOTE = {To appear}, + URL = {http://krakatoa.lri.fr}, + TOPICS = {team} +} +@ARTICLE{marche04jlap, + AUTHOR = {Claude March{\'e} and Christine Paulin-Mohring and Xavier Urbain}, + TITLE = {The \textsc{Krakatoa} Tool for Certification of \textsc{Java/JavaCard} Programs annotated in \textsc{JML}}, + JOURNAL = {Journal of Logic and Algebraic Programming}, + YEAR = 2004, + VOLUME = 58, + NUMBER = {1--2}, + PAGES = {89--106}, + URL = {http://krakatoa.lri.fr}, + TOPICS = {team} +} + +@TECHREPORT{catano03deliv, + AUTHOR = {N{\'e}stor Cata{\~n}o and Marek Gawkowski and +Marieke Huisman and Bart Jacobs and Claude March{\'e} and Christine Paulin +and Erik Poll and Nicole Rauch and Xavier Urbain}, + TITLE = {Logical Techniques for Applet Verification}, + INSTITUTION = {VerifiCard Project}, + YEAR = 2003, + TYPE = {Deliverable}, + NUMBER = {5.2}, + TOPICS = {team}, + NOTE = {Available from \url{http://www.verificard.org}} +} + +@TECHREPORT{kmu2002rr, + AUTHOR = {Keiichirou Kusakari and Claude Marché and Xavier Urbain}, + TITLE = {Termination of Associative-Commutative Rewriting using Dependency Pairs Criteria}, + INSTITUTION = {LRI}, + YEAR = 2002, + TYPE = {Research Report}, + NUMBER = 1304, + TYPE_PUBLI = {interne}, + TOPICS = {team}, + NOTE = {\url{http://www.lri.fr/~urbain/textes/rr1304.ps.gz}}, + URL = {http://www.lri.fr/~urbain/textes/rr1304.ps.gz} +} + +@ARTICLE{marche2004jsc, + AUTHOR = {Claude March\'e and Xavier Urbain}, + TITLE = {Modular {\&} Incremental Proofs of {AC}-Termination}, + JOURNAL = {Journal of Symbolic Computation}, + YEAR = 2004, + TOPICS = {team} +} + +@INPROCEEDINGS{contejean03wst, + AUTHOR = {Evelyne Contejean and Claude Marché and Benjamin Monate and Xavier Urbain}, + TITLE = {{Proving Termination of Rewriting with {\sc C\textit{i}ME}}}, + CROSSREF = {wst03}, + PAGES = {71--73}, + NOTE = {\url{http://cime.lri.fr/}}, + URL = {http://cime.lri.fr/}, + YEAR = 2003, + TYPE_PUBLI = {icolcomlec}, + TOPICS = {team} +} + +@TECHREPORT{contejean04rr, + AUTHOR = {Evelyne Contejean and Claude March{\'e} and Ana-Paula Tom{\'a}s and Xavier Urbain}, + TITLE = {Mechanically proving termination using polynomial interpretations}, + INSTITUTION = {LRI}, + YEAR = {2004}, + TYPE = {Research Report}, + NUMBER = {1382}, + TYPE_PUBLI = {interne}, + TOPICS = {team}, + URL = {http://www.lri.fr/~urbain/textes/rr1382.ps.gz} +} + +@UNPUBLISHED{duran_sub, + AUTHOR = {Francisco Duran and Salvador Lucas and + Claude {March\'e} and {Jos\'e} Meseguer and Xavier Urbain}, + TITLE = {Termination of Membership Equational Programs}, + NOTE = {Submitted} +} + +@PROCEEDINGS{comon95lncs, + TITLE = {Term Rewriting}, + BOOKTITLE = {Term Rewriting}, + TOPICS = {team, cclserver}, + YEAR = 1995, + EDITOR = {Hubert Comon and Jean-Pierre Jouannaud}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = {909}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + ORGANIZATION = {French Spring School of Theoretical Computer + Science}, + TYPE_PUBLI = {editeur}, + CLEF_LABO = {CJ95} +} + +@PROCEEDINGS{lics94, + TITLE = {Proceedings of the Ninth Annual IEEE Symposium on Logic + in Computer Science}, + BOOKTITLE = {Proceedings of the Ninth Annual IEEE Symposium on Logic + in Computer Science}, + YEAR = 1994, + MONTH = JUL, + ADDRESS = {Paris, France}, + ORGANIZATION = {{IEEE} Comp. Soc. Press} +} + +@PROCEEDINGS{rta91, + TITLE = {4th International Conference on Rewriting Techniques and + Applications}, + BOOKTITLE = {4th International Conference on Rewriting Techniques and + Applications}, + EDITOR = {Ronald. V. Book}, + YEAR = 1991, + MONTH = APR, + ADDRESS = {Como, Italy}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 488 +} + +@PROCEEDINGS{rta96, + TITLE = {7th International Conference on Rewriting Techniques and + Applications}, + BOOKTITLE = {7th International Conference on Rewriting Techniques and + Applications}, + EDITOR = {Harald Ganzinger}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + YEAR = 1996, + MONTH = JUL, + ADDRESS = {New Brunswick, NJ, USA}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 1103 +} + +@PROCEEDINGS{rta97, + TITLE = {8th International Conference on Rewriting Techniques and + Applications}, + BOOKTITLE = {8th International Conference on Rewriting Techniques and + Applications}, + EDITOR = {Hubert Comon}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + YEAR = 1997, + MONTH = JUN, + ADDRESS = {Barcelona, Spain}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = {1232} +} + +@PROCEEDINGS{rta98, + TITLE = {9th International Conference on Rewriting Techniques and + Applications}, + BOOKTITLE = {9th International Conference on Rewriting Techniques and + Applications}, + EDITOR = {Tobias Nipkow}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + YEAR = 1998, + MONTH = APR, + ADDRESS = {Tsukuba, Japan}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = {1379} +} + +@PROCEEDINGS{rta00, + TITLE = {11th International Conference on Rewriting Techniques and Applications}, + BOOKTITLE = {11th International Conference on Rewriting Techniques and Applications}, + EDITOR = {Leo Bachmair}, + PUBLISHER = {{Sprin\-ger-Verlag}}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 1833, + MONTH = JUL, + YEAR = 2000, + ADDRESS = {Norwich, UK} +} + +@PROCEEDINGS{srt95, + TITLE = {Proceedings of the Conference on Symbolic Rewriting + Techniques}, + BOOKTITLE = {Proceedings of the Conference on Symbolic Rewriting + Techniques}, + YEAR = 1995, + EDITOR = {Manuel Bronstein and Volker Weispfenning}, + ADDRESS = {Monte Verita, Switzerland} +} + +@BOOK{comon01cclbook, + BOOKTITLE = {Constraints in Computational Logics}, + TITLE = {Constraints in Computational Logics}, + EDITOR = {Hubert Comon and Claude March{\'e} and Ralf Treinen}, + YEAR = 2001, + PUBLISHER = {{Sprin\-ger-Verlag}}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 2002, + TOPICS = {team}, + TYPE_PUBLI = {editeur} +} + +@PROCEEDINGS{wst03, + BOOKTITLE = {{Extended Abstracts of the 6th International Workshop on Termination, WST'03}}, + TITLE = {{Extended Abstracts of the 6th International Workshop on Termination, WST'03}}, + YEAR = {2003}, + EDITOR = {Albert Rubio}, + MONTH = JUN, + NOTE = {Technical Report DSIC II/15/03, Universidad Politécnica de Valencia, Spain} +} + +@INPROCEEDINGS{FilliatreLetouzey03, + AUTHOR = {J.-C. Filli\^atre and P. Letouzey}, + TITLE = {{Functors for Proofs and Programs}}, + BOOKTITLE = {Proceedings of The European Symposium on Programming}, + YEAR = 2004, + ADDRESS = {Barcelona, Spain}, + MONTH = {March 29-April 2}, + NOTE = {To appear}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/fpp.ps.gz} +} + +@TECHREPORT{Filliatre03, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Why: a multi-language multi-prover verification tool}}, + INSTITUTION = {{LRI, Universit\'e Paris Sud}}, + TYPE = {{Research Report}}, + NUMBER = {1366}, + MONTH = {March}, + YEAR = 2003, + URL = {http://www.lri.fr/~filliatr/ftp/publis/why-tool.ps.gz} +} + +@ARTICLE{FilliatrePottier02, + AUTHOR = {J.-C. Filli{\^a}tre and F. Pottier}, + TITLE = {{Producing All Ideals of a Forest, Functionally}}, + JOURNAL = {Journal of Functional Programming}, + VOLUME = 13, + NUMBER = 5, + PAGES = {945--956}, + MONTH = {September}, + YEAR = 2003, + URL = {http://www.lri.fr/~filliatr/ftp/publis/kr-fp.ps.gz}, + ABSTRACT = { + We present a functional implementation of Koda and Ruskey's + algorithm for generating all ideals of a forest poset as a Gray + code. Using a continuation-based approach, we give an extremely + concise formulation of the algorithm's core. Then, in a number of + steps, we derive a first-order version whose efficiency is + comparable to a C implementation given by Knuth.} +} + +@UNPUBLISHED{FORS01, + AUTHOR = {J.-C. Filli{\^a}tre and S. Owre and H. Rue{\ss} and N. Shankar}, + TITLE = {Deciding Propositional Combinations of Equalities and Inequalities}, + NOTE = {Unpublished}, + MONTH = OCT, + YEAR = 2001, + URL = {http://www.lri.fr/~filliatr/ftp/publis/ics.ps}, + ABSTRACT = { + We address the problem of combining individual decision procedures + into a single decision procedure. Our combination approach is based + on using the canonizer obtained from Shostak's combination algorithm + for equality. We illustrate our approach with a combination + algorithm for equality, disequality, arithmetic inequality, and + propositional logic. Unlike the Nelson--Oppen combination where the + processing of equalities is distributed across different closed + decision procedures, our combination involves the centralized + processing of equalities in a single procedure. The termination + argument for the combination is based on that for Shostak's + algorithm. We also give soundness and completeness arguments.} +} + +@INPROCEEDINGS{ICS, + AUTHOR = {J.-C. Filli{\^a}tre and S. Owre and H. Rue{\ss} and N. Shankar}, + TITLE = {{ICS: Integrated Canonization and Solving (Tool presentation)}}, + BOOKTITLE = {Proceedings of CAV'2001}, + EDITOR = {G. Berry and H. Comon and A. Finkel}, + PUBLISHER = {Springer-Verlag}, + SERIES = {Lecture Notes in Computer Science}, + VOLUME = 2102, + PAGES = {246--249}, + YEAR = 2001 +} + +@INPROCEEDINGS{Filliatre01a, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {La supériorité de l'ordre supérieur}, + BOOKTITLE = {Journées Francophones des Langages Applicatifs}, + PAGES = {15--26}, + MONTH = {Janvier}, + YEAR = 2002, + ADDRESS = {Anglet, France}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/sos.ps.gz}, + CODE = {http://www.lri.fr/~filliatr/ftp/ocaml/misc/koda-ruskey.ps}, + ABSTRACT = { + Nous présentons ici une écriture fonctionnelle de l'algorithme de + Koda-Ruskey, un algorithme pour engendrer une large famille + de codes de Gray. En s'inspirant de techniques de programmation par + continuation, nous aboutissons à un code de neuf lignes seulement, + bien plus élégant que les implantations purement impératives + proposées jusqu'ici, notamment par Knuth. Dans un second temps, + nous montrons comment notre code peut être légèrement modifié pour + aboutir à une version de complexité optimale. + Notre implantation en Objective Caml rivalise d'efficacité avec les + meilleurs codes C. Nous détaillons les calculs de complexité, + un exercice intéressant en présence d'ordre supérieur et d'effets de + bord combinés.} +} + +@TECHREPORT{Filliatre00c, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Design of a proof assistant: Coq version 7}}, + INSTITUTION = {{LRI, Universit\'e Paris Sud}}, + TYPE = {{Research Report}}, + NUMBER = {1369}, + MONTH = {October}, + YEAR = 2000, + URL = {http://www.lri.fr/~filliatr/ftp/publis/coqv7.ps.gz}, + ABSTRACT = { + We present the design and implementation of the new version of the + Coq proof assistant. The main novelty is the isolation of the + critical part of the system, which consists in a type checker for + the Calculus of Inductive Constructions. This kernel is now + completely independent of the rest of the system and has been + rewritten in a purely functional way. This leads to greater clarity + and safety, without compromising efficiency. It also opens the way to + the ``bootstrap'' of the Coq system, where the kernel will be + certified using Coq itself.} +} + +@TECHREPORT{Filliatre00b, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Hash consing in an ML framework}}, + INSTITUTION = {{LRI, Universit\'e Paris Sud}}, + TYPE = {{Research Report}}, + NUMBER = {1368}, + MONTH = {September}, + YEAR = 2000, + URL = {http://www.lri.fr/~filliatr/ftp/publis/hash-consing.ps.gz}, + ABSTRACT = { + Hash consing is a technique to share values that are structurally + equal. Beyond the obvious advantage of saving memory blocks, hash + consing may also be used to gain speed in several operations (like + equality test) and data structures (like sets or maps) when sharing is + maximal. However, physical adresses cannot be used directly for this + purpose when the garbage collector is likely to move blocks + underneath. We present an easy solution in such a framework, with + many practical benefits.} +} + +@MISC{ocamlweb, + AUTHOR = {J.-C. Filli\^atre and C. March\'e}, + TITLE = {{ocamlweb, a literate programming tool for Objective Caml}}, + NOTE = {Available at \url{http://www.lri.fr/~filliatr/ocamlweb/}}, + URL = {http://www.lri.fr/~filliatr/ocamlweb/} +} + +@ARTICLE{Filliatre00a, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Verification of Non-Functional Programs + using Interpretations in Type Theory}}, + JOURNAL = {Journal of Functional Programming}, + VOLUME = 13, + NUMBER = 4, + PAGES = {709--745}, + MONTH = {July}, + YEAR = 2003, + NOTE = {English translation of~\cite{Filliatre99}.}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, + ABSTRACT = {We study the problem of certifying programs combining imperative and + functional features within the general framework of type theory. + + Type theory constitutes a powerful specification language, which is + naturally suited for the proof of purely functional programs. To + deal with imperative programs, we propose a logical interpretation + of an annotated program as a partial proof of its specification. The + construction of the corresponding partial proof term is based on a + static analysis of the effects of the program, and on the use of + monads. The usual notion of monads is refined in order to account + for the notion of effect. The missing subterms in the partial proof + term are seen as proof obligations, whose actual proofs are left to + the user. We show that the validity of those proof obligations + implies the total correctness of the program. + We also establish a result of partial completeness. + + This work has been implemented in the Coq proof assistant. + It appears as a tactic taking an annotated program as argument and + generating a set of proof obligations. Several nontrivial + algorithms have been certified using this tactic.} +} + +@ARTICLE{Filliatre99c, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Formal Proof of a Program: Find}}, + JOURNAL = {Science of Computer Programming}, + YEAR = 2001, + NOTE = {To appear}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}, + ABSTRACT = {In 1971, C.~A.~R.~Hoare gave the proof of correctness and termination of a + rather complex algorithm, in a paper entitled \emph{Proof of a + program: Find}. It is a hand-made proof, where the + program is given together with its formal specification and where + each step is fully + justified by a mathematical reasoning. We present here a formal + proof of the same program in the system Coq, using the + recent tactic of the system developed to establishing the total + correctness of + imperative programs. We follow Hoare's paper as close as + possible, keeping the same program and the same specification. We + show that we get exactly the same proof obligations, which are + proved in a straightforward way, following the original paper. + We also explain how more informal reasonings of Hoare's proof are + formalized in the system Coq. + This demonstrates the adequacy of the system Coq in the + process of certifying imperative programs.} +} + +@TECHREPORT{Filliatre99b, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{A theory of monads parameterized by effects}}, + INSTITUTION = {{LRI, Universit\'e Paris Sud}}, + TYPE = {{Research Report}}, + NUMBER = {1367}, + MONTH = {November}, + YEAR = 1999, + URL = {http://www.lri.fr/~filliatr/ftp/publis/monads.ps.gz}, + ABSTRACT = {Monads were introduced in computer science to express the semantics + of programs with computational effects, while type and effect + inference was introduced to mark out those effects. + In this article, we propose a combination of the notions of effects + and monads, where the monadic operators are parameterized by effects. + We establish some relationships between those generalized monads and + the classical ones. + Then we use a generalized monad to translate imperative programs + into purely functional ones. We establish the correctness of that + translation. This work has been put into practice in the Coq proof + assistant to establish the correctness of imperative programs.} +} + +@PHDTHESIS{Filliatre99, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Preuve de programmes imp\'eratifs en th\'eorie des types}}, + TYPE = {Th{\`e}se de Doctorat}, + SCHOOL = {Universit\'e Paris-Sud}, + YEAR = 1999, + MONTH = {July}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}, + ABSTRACT = {Nous étudions le problème de la certification de programmes mêlant + traits impératifs et fonctionnels dans le cadre de la théorie des + types. + + La théorie des types constitue un puissant langage de spécification, + naturellement adapté à la preuve de programmes purement + fonctionnels. Pour y certifier également des programmes impératifs, + nous commençons par exprimer leur sémantique de manière purement + fonctionnelle. Cette traduction repose sur une analyse statique des + effets de bord des programmes, et sur l'utilisation de la notion de + monade, notion que nous raffinons en l'associant à la notion d'effet + de manière générale. Nous montrons que cette traduction est + sémantiquement correcte. + + Puis, à partir d'un programme annoté, nous construisons une preuve + de sa spécification, traduite de manière fonctionnelle. Cette preuve + est bâtie sur la traduction fonctionnelle précédemment + introduite. Elle est presque toujours incomplète, les parties + manquantes étant autant d'obligations de preuve qui seront laissées + à la charge de l'utilisateur. Nous montrons que la validité de ces + obligations entraîne la correction totale du programme. + + Nous avons implanté notre travail dans l'assistant de preuve + Coq, avec lequel il est dès à présent distribué. Cette + implantation se présente sous la forme d'une tactique prenant en + argument un programme annoté et engendrant les obligations de + preuve. Plusieurs algorithmes non triviaux ont été certifiés à + l'aide de cet outil (Find, Quicksort, Heapsort, algorithme de + Knuth-Morris-Pratt).} +} + +@INPROCEEDINGS{FilliatreMagaud99, + AUTHOR = {J.-C. Filli\^atre and N. Magaud}, + TITLE = {{Certification of sorting algorithms in the system Coq}}, + BOOKTITLE = {Theorem Proving in Higher Order Logics: + Emerging Trends}, + YEAR = 1999, + ABSTRACT = {We present the formal proofs of total correctness of three sorting + algorithms in the system Coq, namely \textit{insertion sort}, + \textit{quicksort} and \textit{heapsort}. The implementations are + imperative programs working in-place on a given array. Those + developments demonstrate the usefulness of inductive types and higher-order + logic in the process of software certification. They also + show that the proof of rather complex algorithms may be done in a + small amount of time --- only a few days for each development --- + and without great difficulty.}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz} +} + +@INPROCEEDINGS{Filliatre98, + AUTHOR = {J.-C. Filli\^atre}, + TITLE = {{Proof of Imperative Programs in Type Theory}}, + BOOKTITLE = {International Workshop, TYPES '98, Kloster Irsee, Germany}, + PUBLISHER = {Springer-Verlag}, + VOLUME = 1657, + SERIES = {Lecture Notes in Computer Science}, + MONTH = MAR, + YEAR = {1998}, + ABSTRACT = {We present a new approach to certifying imperative programs, + in the context of Type Theory. + The key is a functional translation of imperative programs, which is + made possible by an analysis of their effects. + On sequential imperative programs, we get the same proof + obligations as those given by Floyd-Hoare logic, + but our approach also includes functional constructions. + As a side-effect, we propose a way to eradicate the use of auxiliary + variables in specifications. + This work has been implemented in the Coq Proof Assistant and applied + on non-trivial examples.}, + URL = {http://www.lri.fr/~filliatr/ftp/publis/types98.ps.gz} +} + +@TECHREPORT{Filliatre97, + AUTHOR = {J.-C. Filli\^atre}, + INSTITUTION = {LIP - ENS Lyon}, + NUMBER = {97--04}, + TITLE = {{Finite Automata Theory in Coq: + A constructive proof of Kleene's theorem}}, + TYPE = {Research Report}, + MONTH = {February}, + YEAR = {1997}, + ABSTRACT = {We describe here a development in the system Coq + of a piece of Finite Automata Theory. The main result is the Kleene's + theorem, expressing that regular expressions and finite automata + define the same languages. From a constructive proof of this result, + we automatically obtain a functional program that compiles any + regular expression into a finite automata, which constitutes the main + part of the implementation of {\tt grep}-like programs. This + functional program is obtained by the automatic method of {\em + extraction} which removes the logical parts of the proof to keep only + its informative contents. Starting with an idea of what we would + have written in ML, we write the specification and do the proofs in + such a way that we obtain the expected program, which is therefore + efficient.}, + URL = {ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR97/RR97-04.ps.Z} +} + +@TECHREPORT{Filliatre95, + AUTHOR = {J.-C. Filli\^atre}, + INSTITUTION = {LIP - ENS Lyon}, + NUMBER = {96--25}, + TITLE = {{A decision procedure for Direct Predicate + Calculus: study and implementation in + the Coq system}}, + TYPE = {Research Report}, + MONTH = {February}, + YEAR = {1995}, + ABSTRACT = {The paper of J. Ketonen and R. Weyhrauch \emph{A + decidable fragment of Predicate Calculus} defines a decidable + fragment of first-order predicate logic - Direct Predicate Calculus + - as the subset which is provable in Gentzen sequent calculus + without the contraction rule, and gives an effective decision + procedure for it. This report is a detailed study of this + procedure. We extend the decidability to non-prenex formulas. We + prove that the intuitionnistic fragment is still decidable, with a + refinement of the same procedure. An intuitionnistic version has + been implemented in the Coq system using a translation into + natural deduction.}, + URL = {ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR96/RR96-25.ps.Z} +} + +@TECHREPORT{Filliatre94, + AUTHOR = {J.-C. Filli\^atre}, + MONTH = {Juillet}, + INSTITUTION = {Ecole Normale Sup\'erieure}, + TITLE = {{Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct~: \'etude et impl\'ementation dans le syst\`eme Coq}}, + TYPE = {Rapport de {DEA}}, + YEAR = {1994}, + URL = {ftp://ftp.lri.fr/LRI/articles/filliatr/memoire.dvi.gz} +} + +@TECHREPORT{CourantFilliatre93, + AUTHOR = {J. Courant et J.-C. Filli\^atre}, + MONTH = {Septembre}, + INSTITUTION = {Ecole Normale Sup\'erieure}, + TITLE = {{Formalisation de la th\'eorie des langages + formels en Coq}}, + TYPE = {Rapport de ma\^{\i}trise}, + YEAR = {1993}, + URL = {http://www.ens-lyon.fr/~jcourant/stage_maitrise.dvi.gz}, + URL2 = {http://www.ens-lyon.fr/~jcourant/stage_maitrise.ps.gz} +} + +@INPROCEEDINGS{tphols2000-Letouzey, + crossref = "tphols2000", + title = "Formalizing {S}t{\aa}lmarck's algorithm in {C}oq", + author = "Pierre Letouzey and Laurent Th{\'e}ry", + pages = "387--404"} + +@PROCEEDINGS{tphols2000, + editor = "J. Harrison and M. Aagaard", + booktitle = "Theorem Proving in Higher Order Logics: + 13th International Conference, TPHOLs 2000", + series = "Lecture Notes in Computer Science", + volume = 1869, + year = 2000, + publisher = "Springer-Verlag"} + +@InCollection{howe, + author = {Doug Howe}, + title = {Computation Meta theory in Nuprl}, + booktitle = {The Proceedings of the Ninth International Conference of Autom +ated Deduction}, + volume = {310}, + editor = {E. Lusk and R. Overbeek}, + publisher = {Springer-Verlag}, + pages = {238--257}, + year = {1988} +} + +@TechReport{harrison, + author = {John Harrison}, + title = {Meta theory and Reflection in Theorem Proving:a Survey and Cri +tique}, + institution = {SRI International Cambridge Computer Science Research Center}, + year = {1995}, + number = {CRC-053} +} + +@InCollection{cc, + author = {Thierry Coquand and Gérard Huet}, + title = {The Calculus of Constructions}, + booktitle = {Information and Computation}, + year = {1988}, + volume = {76}, + number = {2/3} +} + + +@InProceedings{coquandcci, + author = {Thierry Coquand and Christine Paulin-Mohring}, + title = {Inductively defined types}, + booktitle = {Proceedings of Colog'88}, + year = {1990}, + editor = {P. Martin-Löf and G. Mints}, + volume = {417}, + series = {LNCS}, + publisher = {Springer-Verlag} +} + + +@InProceedings{boutin, + author = {Samuel Boutin}, + title = {Using reflection to build efficient and certified decision pro +cedures.}, + booktitle = {Proceedings of TACS'97}, + year = {1997}, + editor = {M. Abadi and T. Ito}, + volume = {1281}, + series = {LNCS}, + publisher = {Springer-Verlag} +} + +@Manual{Coq:manual, + title = {The Coq proof assistant reference manual}, + author = {\mbox{The Coq development team}}, + organization = {LogiCal Project}, + note = {Version 8.0}, + year = {2004}, + url = "http://coq.inria.fr" +} + +@string{jfp = "Journal of Functional Programming"} +@STRING{lncs="Lecture Notes in Computer Science"} +@STRING{lnai="Lecture Notes in Artificial Intelligence"} +@string{SV = "{Sprin\-ger-Verlag}"} + +@INPROCEEDINGS{Aud91, + AUTHOR = {Ph. Audebaud}, + BOOKTITLE = {Proceedings of the sixth Conf. on Logic in Computer Science.}, + PUBLISHER = {IEEE}, + TITLE = {Partial {Objects} in the {Calculus of Constructions}}, + YEAR = {1991} +} + +@PHDTHESIS{Aud92, + AUTHOR = {Ph. Audebaud}, + SCHOOL = {{Universit\'e} Bordeaux I}, + TITLE = {Extension du Calcul des Constructions par Points fixes}, + YEAR = {1992} +} + +@INPROCEEDINGS{Audebaud92b, + AUTHOR = {Ph. Audebaud}, + BOOKTITLE = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, + EDITOR = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, + NOTE = {Also Research Report LIP-ENS-Lyon}, + PAGES = {pp 21--34}, + TITLE = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, + YEAR = {1992} +} + +@INPROCEEDINGS{Augustsson85, + AUTHOR = {L. Augustsson}, + TITLE = {{Compiling Pattern Matching}}, + BOOKTITLE = {Conference Functional Programming and +Computer Architecture}, + YEAR = {1985} +} + +@ARTICLE{BaCo85, + AUTHOR = {J.L. Bates and R.L. Constable}, + JOURNAL = {ACM transactions on Programming Languages and Systems}, + TITLE = {Proofs as {Programs}}, + VOLUME = {7}, + YEAR = {1985} +} + +@BOOK{Bar81, + AUTHOR = {H.P. Barendregt}, + PUBLISHER = {North-Holland}, + TITLE = {The Lambda Calculus its Syntax and Semantics}, + YEAR = {1981} +} + +@TECHREPORT{Bar91, + AUTHOR = {H. Barendregt}, + INSTITUTION = {Catholic University Nijmegen}, + NOTE = {In Handbook of Logic in Computer Science, Vol II}, + NUMBER = {91-19}, + TITLE = {Lambda {Calculi with Types}}, + YEAR = {1991} +} + +@ARTICLE{BeKe92, + AUTHOR = {G. Bellin and J. Ketonen}, + JOURNAL = {Theoretical Computer Science}, + PAGES = {115--142}, + TITLE = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, + VOLUME = {95}, + YEAR = {1992} +} + +@BOOK{Bee85, + AUTHOR = {M.J. Beeson}, + PUBLISHER = SV, + TITLE = {Foundations of Constructive Mathematics, Metamathematical Studies}, + YEAR = {1985} +} + +@BOOK{Bis67, + AUTHOR = {E. Bishop}, + PUBLISHER = {McGraw-Hill}, + TITLE = {Foundations of Constructive Analysis}, + YEAR = {1967} +} + +@BOOK{BoMo79, + AUTHOR = {R.S. Boyer and J.S. Moore}, + KEY = {BoMo79}, + PUBLISHER = {Academic Press}, + SERIES = {ACM Monograph}, + TITLE = {A computational logic}, + YEAR = {1979} +} + +@MASTERSTHESIS{Bou92, + AUTHOR = {S. Boutin}, + MONTH = sep, + SCHOOL = {{Universit\'e Paris 7}}, + TITLE = {Certification d'un compilateur {ML en Coq}}, + YEAR = {1992} +} + +@inproceedings{Bou97, + title = {Using reflection to build efficient and certified decision procedure +s}, + author = {S. Boutin}, + booktitle = {TACS'97}, + editor = {Martin Abadi and Takahashi Ito}, + publisher = SV, + series = lncs, + volume=1281, + PS={http://pauillac.inria.fr/~boutin/public_w/submitTACS97.ps.gz}, + year = {1997} +} + +@PhdThesis{Bou97These, + author = {S. Boutin}, + title = {R\'eflexions sur les quotients}, + school = {Paris 7}, + year = 1997, + type = {th\`ese d'Universit\'e}, + month = apr +} + +@ARTICLE{Bru72, + AUTHOR = {N.J. de Bruijn}, + JOURNAL = {Indag. Math.}, + TITLE = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, + VOLUME = {34}, + YEAR = {1972} +} + + +@INCOLLECTION{Bru80, + AUTHOR = {N.J. de Bruijn}, + BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + EDITOR = {J.P. Seldin and J.R. Hindley}, + PUBLISHER = {Academic Press}, + TITLE = {A survey of the project {Automath}}, + YEAR = {1980} +} + +@TECHREPORT{COQ93, + AUTHOR = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, + INSTITUTION = {INRIA}, + MONTH = may, + NUMBER = {154}, + TITLE = {{The Coq Proof Assistant User's Guide Version 5.8}}, + YEAR = {1993} +} + +@TECHREPORT{CPar93, + AUTHOR = {C. Parent}, + INSTITUTION = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + MONTH = oct, + NOTE = {Also in~\cite{Nijmegen93}}, + NUMBER = {93-29}, + TITLE = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, + YEAR = {1993} +} + +@PHDTHESIS{CPar95, + AUTHOR = {C. Parent}, + SCHOOL = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + TITLE = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, + YEAR = {1995} +} + +@BOOK{Caml, + AUTHOR = {P. Weis and X. Leroy}, + PUBLISHER = {InterEditions}, + TITLE = {Le langage Caml}, + YEAR = {1993} +} + +@INPROCEEDINGS{ChiPotSimp03, + AUTHOR = {Laurent Chicli and Lo\"{\i}c Pottier and Carlos Simpson}, + ADDRESS = {Berg en Dal, The Netherlands}, + TITLE = {Mathematical Quotients and Quotient Types in Coq}, + BOOKTITLE = {TYPES'02}, + PUBLISHER = SV, + SERIES = LNCS, + VOLUME = {2646}, + YEAR = {2003} +} + +@TECHREPORT{CoC89, + AUTHOR = {Projet Formel}, + INSTITUTION = {INRIA}, + NUMBER = {110}, + TITLE = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, + YEAR = {1989} +} + +@INPROCEEDINGS{CoHu85a, + AUTHOR = {Thierry Coquand and Gérard Huet}, + ADDRESS = {Linz}, + BOOKTITLE = {EUROCAL'85}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, + VOLUME = {203}, + YEAR = {1985} +} + +@INPROCEEDINGS{CoHu85b, + AUTHOR = {Thierry Coquand and Gérard Huet}, + BOOKTITLE = {Logic Colloquium'85}, + EDITOR = {The Paris Logic Group}, + PUBLISHER = {North-Holland}, + TITLE = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, + YEAR = {1987} +} + +@ARTICLE{CoHu86, + AUTHOR = {Thierry Coquand and Gérard Huet}, + JOURNAL = {Information and Computation}, + NUMBER = {2/3}, + TITLE = {The {Calculus of Constructions}}, + VOLUME = {76}, + YEAR = {1988} +} + +@INPROCEEDINGS{CoPa89, + AUTHOR = {Thierry Coquand and Christine Paulin-Mohring}, + BOOKTITLE = {Proceedings of Colog'88}, + EDITOR = {P. Martin-L\"of and G. Mints}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {Inductively defined types}, + VOLUME = {417}, + YEAR = {1990} +} + +@BOOK{Con86, + AUTHOR = {R.L. {Constable et al.}}, + PUBLISHER = {Prentice-Hall}, + TITLE = {{Implementing Mathematics with the Nuprl Proof Development System}}, + YEAR = {1986} +} + +@PHDTHESIS{Coq85, + AUTHOR = {Thierry Coquand}, + MONTH = jan, + SCHOOL = {Universit\'e Paris~7}, + TITLE = {Une Th\'eorie des Constructions}, + YEAR = {1985} +} + +@INPROCEEDINGS{Coq86, + AUTHOR = {Thierry Coquand}, + ADDRESS = {Cambridge, MA}, + BOOKTITLE = {Symposium on Logic in Computer Science}, + PUBLISHER = {IEEE Computer Society Press}, + TITLE = {{An Analysis of Girard's Paradox}}, + YEAR = {1986} +} + +@INPROCEEDINGS{Coq90, + AUTHOR = {Thierry Coquand}, + BOOKTITLE = {Logic and Computer Science}, + EDITOR = {P. Oddifredi}, + NOTE = {INRIA Research Report 1088, also in~\cite{CoC89}}, + PUBLISHER = {Academic Press}, + TITLE = {{Metamathematical Investigations of a Calculus of Constructions}}, + YEAR = {1990} +} + +@INPROCEEDINGS{Coq91, + AUTHOR = {Thierry Coquand}, + BOOKTITLE = {Proceedings 9th Int. Congress of Logic, Methodology and Philosophy of Science}, + TITLE = {{A New Paradox in Type Theory}}, + MONTH = {August}, + YEAR = {1991} +} + +@INPROCEEDINGS{Coq92, + AUTHOR = {Thierry Coquand}, + TITLE = {{Pattern Matching with Dependent Types}}, + YEAR = {1992}, + crossref = {Bastad92} +} + +@INPROCEEDINGS{Coquand93, + AUTHOR = {Thierry Coquand}, + TITLE = {{Infinite Objects in Type Theory}}, + YEAR = {1993}, + crossref = {Nijmegen93} +} + +@MASTERSTHESIS{Cou94a, + AUTHOR = {J. Courant}, + MONTH = sep, + SCHOOL = {DEA d'Informatique, ENS Lyon}, + TITLE = {Explicitation de preuves par r\'ecurrence implicite}, + YEAR = {1994} +} + +@INPROCEEDINGS{Del99, + author = "Delahaye, D.", + title = "Information Retrieval in a Coq Proof Library using + Type Isomorphisms", + booktitle = {Proceedings of TYPES'99, L\"okeberg}, + publisher = SV, + series = lncs, + year = "1999", + url = + "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf TYPES99-SIsos.ps.gz}" +} + +@INPROCEEDINGS{Del00, + author = "Delahaye, D.", + title = "A {T}actic {L}anguage for the {S}ystem {{\sf Coq}}", + booktitle = "Proceedings of Logic for Programming and Automated Reasoning + (LPAR), Reunion Island", + publisher = SV, + series = LNCS, + volume = "1955", + pages = "85--95", + month = "November", + year = "2000", + url = + "{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf LPAR2000-ltac.ps.gz}" +} + +@INPROCEEDINGS{DelMay01, + author = "Delahaye, D. and Mayero, M.", + title = {{\tt Field}: une proc\'edure de d\'ecision pour les nombres r\'eels + en {\Coq}}, + booktitle = "Journ\'ees Francophones des Langages Applicatifs, Pontarlier", + publisher = "INRIA", + month = "Janvier", + year = "2001", + url = + "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf JFLA2000-Field.ps.gz}" +} + +@TECHREPORT{Dow90, + AUTHOR = {G. Dowek}, + INSTITUTION = {INRIA}, + NUMBER = {1283}, + TITLE = {Naming and Scoping in a Mathematical Vernacular}, + TYPE = {Research Report}, + YEAR = {1990} +} + +@ARTICLE{Dow91a, + AUTHOR = {G. Dowek}, + JOURNAL = {Compte-Rendus de l'Acad\'emie des Sciences}, + NOTE = {The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors}, + NUMBER = {12}, + PAGES = {951--956}, + TITLE = {L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}, + VOLUME = {I, 312}, + YEAR = {1991} +} + +@INPROCEEDINGS{Dow91b, + AUTHOR = {G. Dowek}, + BOOKTITLE = {Proceedings of Mathematical Foundation of Computer Science}, + NOTE = {Also INRIA Research Report}, + PAGES = {151--160}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {A Second Order Pattern Matching Algorithm in the Cube of Typed $\lambda$-calculi}, + VOLUME = {520}, + YEAR = {1991} +} + +@PHDTHESIS{Dow91c, + AUTHOR = {G. Dowek}, + MONTH = dec, + SCHOOL = {Universit\'e Paris 7}, + TITLE = {D\'emonstration automatique dans le Calcul des Constructions}, + YEAR = {1991} +} + +@article{Dow92a, + AUTHOR = {G. Dowek}, + TITLE = {The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}, + YEAR = 1993, + journal = tcs, + volume = 107, + number = 2, + pages = {349-356} +} + + +@ARTICLE{Dow94a, + AUTHOR = {G. Dowek}, + JOURNAL = {Annals of Pure and Applied Logic}, + VOLUME = {69}, + PAGES = {135--155}, + TITLE = {Third order matching is decidable}, + YEAR = {1994} +} + +@INPROCEEDINGS{Dow94b, + AUTHOR = {G. Dowek}, + BOOKTITLE = {Proceedings of the second international conference on typed lambda calculus and applications}, + TITLE = {Lambda-calculus, Combinators and the Comprehension Schema}, + YEAR = {1995} +} + +@INPROCEEDINGS{Dyb91, + AUTHOR = {P. Dybjer}, + BOOKTITLE = {Logical Frameworks}, + EDITOR = {G. Huet and G. Plotkin}, + PAGES = {59--79}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Inductive sets and families in {Martin-L{\"o}f's} + Type Theory and their set-theoretic semantics: An inversion principle for {Martin-L\"of's} type theory}, + VOLUME = {14}, + YEAR = {1991} +} + +@ARTICLE{Dyc92, + AUTHOR = {Roy Dyckhoff}, + JOURNAL = {The Journal of Symbolic Logic}, + MONTH = sep, + NUMBER = {3}, + TITLE = {Contraction-free sequent calculi for intuitionistic logic}, + VOLUME = {57}, + YEAR = {1992} +} + +@MASTERSTHESIS{Fil94, + AUTHOR = {J.-C. Filli\^atre}, + MONTH = sep, + SCHOOL = {DEA d'Informatique, ENS Lyon}, + TITLE = {Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct. {\'E}tude et impl\'ementation dans le syst\`eme {\Coq}}, + YEAR = {1994} +} + +@TECHREPORT{Filliatre95, + AUTHOR = {J.-C. Filli\^atre}, + INSTITUTION = {LIP-ENS-Lyon}, + TITLE = {A decision procedure for Direct Predicate Calculus}, + TYPE = {Research report}, + NUMBER = {96--25}, + YEAR = {1995} +} + +@Article{Filliatre03jfp, + author = {J.-C. Filli{\^a}tre}, + title = {Verification of Non-Functional Programs + using Interpretations in Type Theory}, + journal = jfp, + volume = 13, + number = 4, + pages = {709--745}, + month = jul, + year = 2003, + note = {[English translation of \cite{Filliatre99}]}, + url = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, + topics = "team, lri", + type_publi = "irevcomlec" +} + + +@PhdThesis{Filliatre99, + author = {J.-C. Filli\^atre}, + title = {Preuve de programmes imp\'eratifs en th\'eorie des types}, + type = {Th{\`e}se de Doctorat}, + school = {Universit\'e Paris-Sud}, + year = 1999, + month = {July}, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}} +} + +@Unpublished{Filliatre99c, + author = {J.-C. Filli\^atre}, + title = {{Formal Proof of a Program: Find}}, + month = {January}, + year = 2000, + note = {Submitted to \emph{Science of Computer Programming}}, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}} +} + +@InProceedings{FilliatreMagaud99, + author = {J.-C. Filli\^atre and N. Magaud}, + title = {Certification of sorting algorithms in the system {\Coq}}, + booktitle = {Theorem Proving in Higher Order Logics: + Emerging Trends}, + year = 1999, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz}} +} + +@UNPUBLISHED{Fle90, + AUTHOR = {E. Fleury}, + MONTH = jul, + NOTE = {Rapport de Stage}, + TITLE = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, + YEAR = {1990} +} + +@BOOK{Fourier, + AUTHOR = {Jean-Baptiste-Joseph Fourier}, + PUBLISHER = {Gauthier-Villars}, + TITLE = {Fourier's method to solve linear + inequations/equations systems.}, + YEAR = {1890} +} + +@INPROCEEDINGS{Gim94, + AUTHOR = {Eduardo Gim\'enez}, + BOOKTITLE = {Types'94 : Types for Proofs and Programs}, + NOTE = {Extended version in LIP research report 95-07, ENS Lyon}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {Codifying guarded definitions with recursive schemes}, + VOLUME = {996}, + YEAR = {1994} +} + +@TechReport{Gim98, + author = {E. Gim\'enez}, + title = {A Tutorial on Recursive Types in Coq}, + institution = {INRIA}, + year = 1998, + month = mar +} + +@INPROCEEDINGS{Gimenez95b, + AUTHOR = {E. Gim\'enez}, + BOOKTITLE = {Workshop on Types for Proofs and Programs}, + SERIES = LNCS, + NUMBER = {1158}, + PAGES = {135-152}, + TITLE = {An application of co-Inductive types in Coq: + verification of the Alternating Bit Protocol}, + EDITORS = {S. Berardi and M. Coppo}, + PUBLISHER = SV, + YEAR = {1995} +} + +@INPROCEEDINGS{Gir70, + AUTHOR = {Jean-Yves Girard}, + BOOKTITLE = {Proceedings of the 2nd Scandinavian Logic Symposium}, + PUBLISHER = {North-Holland}, + TITLE = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, + YEAR = {1970} +} + +@PHDTHESIS{Gir72, + AUTHOR = {Jean-Yves Girard}, + SCHOOL = {Universit\'e Paris~7}, + TITLE = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, + YEAR = {1972} +} + + + +@BOOK{Gir89, + AUTHOR = {Jean-Yves Girard and Yves Lafont and Paul Taylor}, + PUBLISHER = {Cambridge University Press}, + SERIES = {Cambridge Tracts in Theoretical Computer Science 7}, + TITLE = {Proofs and Types}, + YEAR = {1989} +} + +@TechReport{Har95, + author = {John Harrison}, + title = {Metatheory and Reflection in Theorem Proving: A Survey and Critique}, + institution = {SRI International Cambridge Computer Science Research Centre,}, + year = 1995, + type = {Technical Report}, + number = {CRC-053}, + abstract = {http://www.cl.cam.ac.uk/users/jrh/papers.html} +} + +@MASTERSTHESIS{Hir94, + AUTHOR = {Daniel Hirschkoff}, + MONTH = sep, + SCHOOL = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, + TITLE = {{\'E}criture d'une tactique arithm\'etique pour le syst\`eme {\Coq}}, + YEAR = {1994} +} + +@INPROCEEDINGS{HofStr98, + AUTHOR = {Martin Hofmann and Thomas Streicher}, + TITLE = {The groupoid interpretation of type theory}, + BOOKTITLE = {Proceedings of the meeting Twenty-five years of constructive type theory}, + PUBLISHER = {Oxford University Press}, + YEAR = {1998} +} + +@INCOLLECTION{How80, + AUTHOR = {W.A. Howard}, + BOOKTITLE = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + EDITOR = {J.P. Seldin and J.R. Hindley}, + NOTE = {Unpublished 1969 Manuscript}, + PUBLISHER = {Academic Press}, + TITLE = {The Formulae-as-Types Notion of Constructions}, + YEAR = {1980} +} + + + +@InProceedings{Hue87tapsoft, + author = {G. Huet}, + title = {Programming of Future Generation Computers}, + booktitle = {Proceedings of TAPSOFT87}, + series = LNCS, + volume = 249, + pages = {276--286}, + year = 1987, + publisher = SV +} + +@INPROCEEDINGS{Hue87, + AUTHOR = {G. Huet}, + BOOKTITLE = {Programming of Future Generation Computers}, + EDITOR = {K. Fuchi and M. Nivat}, + NOTE = {Also in \cite{Hue87tapsoft}}, + PUBLISHER = {Elsevier Science}, + TITLE = {Induction Principles Formalized in the {Calculus of Constructions}}, + YEAR = {1988} +} + + + +@INPROCEEDINGS{Hue88, + AUTHOR = {G. Huet}, + BOOKTITLE = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, + EDITOR = {R. Narasimhan}, + NOTE = {Also in~\cite{CoC89}}, + PUBLISHER = {World Scientific Publishing}, + TITLE = {{The Constructive Engine}}, + YEAR = {1989} +} + +@BOOK{Hue89, + EDITOR = {G. Huet}, + PUBLISHER = {Addison-Wesley}, + SERIES = {The UT Year of Programming Series}, + TITLE = {Logical Foundations of Functional Programming}, + YEAR = {1989} +} + +@INPROCEEDINGS{Hue92, + AUTHOR = {G. Huet}, + BOOKTITLE = {Proceedings of 12th FST/TCS Conference, New Delhi}, + PAGES = {229--240}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {The Gallina Specification Language : A case study}, + VOLUME = {652}, + YEAR = {1992} +} + +@ARTICLE{Hue94, + AUTHOR = {G. Huet}, + JOURNAL = {J. Functional Programming}, + PAGES = {371--394}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Residual theory in $\lambda$-calculus: a formal development}, + VOLUME = {4,3}, + YEAR = {1994} +} + +@INCOLLECTION{HuetLevy79, + AUTHOR = {G. Huet and J.-J. L\'{e}vy}, + TITLE = {Call by Need Computations in Non-Ambigous +Linear Term Rewriting Systems}, + NOTE = {Also research report 359, INRIA, 1979}, + BOOKTITLE = {Computational Logic, Essays in Honor of +Alan Robinson}, + EDITOR = {J.-L. Lassez and G. Plotkin}, + PUBLISHER = {The MIT press}, + YEAR = {1991} +} + +@ARTICLE{KeWe84, + AUTHOR = {J. Ketonen and R. Weyhrauch}, + JOURNAL = {Theoretical Computer Science}, + PAGES = {297--307}, + TITLE = {A decidable fragment of {P}redicate {C}alculus}, + VOLUME = {32}, + YEAR = {1984} +} + +@BOOK{Kle52, + AUTHOR = {S.C. Kleene}, + PUBLISHER = {North-Holland}, + SERIES = {Bibliotheca Mathematica}, + TITLE = {Introduction to Metamathematics}, + YEAR = {1952} +} + +@BOOK{Kri90, + AUTHOR = {J.-L. Krivine}, + PUBLISHER = {Masson}, + SERIES = {Etudes et recherche en informatique}, + TITLE = {Lambda-calcul {types et mod\`eles}}, + YEAR = {1990} +} + +@BOOK{LE92, + EDITOR = {G. Huet and G. Plotkin}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Logical Environments}, + YEAR = {1992} +} + +@BOOK{LF91, + EDITOR = {G. Huet and G. Plotkin}, + PUBLISHER = {Cambridge University Press}, + TITLE = {Logical Frameworks}, + YEAR = {1991} +} + +@ARTICLE{Laville91, + AUTHOR = {A. Laville}, + TITLE = {Comparison of Priority Rules in Pattern +Matching and Term Rewriting}, + JOURNAL = {Journal of Symbolic Computation}, + VOLUME = {11}, + PAGES = {321--347}, + YEAR = {1991} +} + +@INPROCEEDINGS{LePa94, + AUTHOR = {F. Leclerc and C. Paulin-Mohring}, + BOOKTITLE = {{Types for Proofs and Programs, Types' 93}}, + EDITOR = {H. Barendregt and T. Nipkow}, + PUBLISHER = SV, + SERIES = {LNCS}, + TITLE = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, + VOLUME = {806}, + YEAR = {1994} +} + +@TECHREPORT{Leroy90, + AUTHOR = {X. Leroy}, + TITLE = {The {ZINC} experiment: an economical implementation +of the {ML} language}, + INSTITUTION = {INRIA}, + NUMBER = {117}, + YEAR = {1990} +} + +@INPROCEEDINGS{Let02, + author = {P. Letouzey}, + title = {A New Extraction for Coq}, + booktitle = {Proceedings of the TYPES'2002 workshop}, + year = 2002, + note = {to appear}, + url = {draft at \url{http://www.lri.fr/~letouzey/download/extraction2002.ps.gz}} +} + +@BOOK{MaL84, + AUTHOR = {{P. Martin-L\"of}}, + PUBLISHER = {Bibliopolis}, + SERIES = {Studies in Proof Theory}, + TITLE = {Intuitionistic Type Theory}, + YEAR = {1984} +} + +@ARTICLE{MaSi94, + AUTHOR = {P. Manoury and M. Simonot}, + JOURNAL = {TCS}, + TITLE = {Automatizing termination proof of recursively defined function}, + YEAR = {To appear} +} + +@INPROCEEDINGS{Moh89a, + AUTHOR = {Christine Paulin-Mohring}, + ADDRESS = {Austin}, + BOOKTITLE = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, + MONTH = jan, + PUBLISHER = {ACM}, + TITLE = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, + YEAR = {1989} +} + +@PHDTHESIS{Moh89b, + AUTHOR = {Christine Paulin-Mohring}, + MONTH = jan, + SCHOOL = {{Universit\'e Paris 7}}, + TITLE = {Extraction de programmes dans le {Calcul des Constructions}}, + YEAR = {1989} +} + +@INPROCEEDINGS{Moh93, + AUTHOR = {Christine Paulin-Mohring}, + BOOKTITLE = {Proceedings of the conference Typed Lambda Calculi and Applications}, + EDITOR = {M. Bezem and J.-F. Groote}, + NOTE = {Also LIP research report 92-49, ENS Lyon}, + NUMBER = {664}, + PUBLISHER = SV, + SERIES = {LNCS}, + TITLE = {{Inductive Definitions in the System Coq - Rules and Properties}}, + YEAR = {1993} +} + +@BOOK{Moh97, + AUTHOR = {Christine Paulin-Mohring}, + MONTH = jan, + PUBLISHER = {{ENS Lyon}}, + TITLE = {{Le syst\`eme Coq. \mbox{Th\`ese d'habilitation}}}, + YEAR = {1997} +} + +@MASTERSTHESIS{Mun94, + AUTHOR = {C. Mu{\~n}oz}, + MONTH = sep, + SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + TITLE = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, + YEAR = {1994} +} + +@PHDTHESIS{Mun97d, + AUTHOR = "C. Mu{\~{n}}oz", + TITLE = "Un calcul de substitutions pour la repr\'esentation + de preuves partielles en th\'eorie de types", + SCHOOL = {Universit\'e Paris 7}, + YEAR = "1997", + Note = {Version en anglais disponible comme rapport de + recherche INRIA RR-3309}, + Type = {Th\`ese de Doctorat} +} + +@BOOK{NoPS90, + AUTHOR = {B. {Nordstr\"om} and K. Peterson and J. Smith}, + BOOKTITLE = {Information Processing 83}, + PUBLISHER = {Oxford Science Publications}, + SERIES = {International Series of Monographs on Computer Science}, + TITLE = {Programming in {Martin-L\"of's} Type Theory}, + YEAR = {1990} +} + +@ARTICLE{Nor88, + AUTHOR = {B. {Nordstr\"om}}, + JOURNAL = {BIT}, + TITLE = {Terminating General Recursion}, + VOLUME = {28}, + YEAR = {1988} +} + +@BOOK{Odi90, + EDITOR = {P. Odifreddi}, + PUBLISHER = {Academic Press}, + TITLE = {Logic and Computer Science}, + YEAR = {1990} +} + +@INPROCEEDINGS{PaMS92, + AUTHOR = {M. Parigot and P. Manoury and M. Simonot}, + ADDRESS = {St. Petersburg, Russia}, + BOOKTITLE = {Logic Programming and automated reasoning}, + EDITOR = {A. Voronkov}, + MONTH = jul, + NUMBER = {624}, + PUBLISHER = SV, + SERIES = {LNCS}, + TITLE = {{ProPre : A Programming language with proofs}}, + YEAR = {1992} +} + +@ARTICLE{PaWe92, + AUTHOR = {Christine Paulin-Mohring and Benjamin Werner}, + JOURNAL = {Journal of Symbolic Computation}, + PAGES = {607--640}, + TITLE = {{Synthesis of ML programs in the system Coq}}, + VOLUME = {15}, + YEAR = {1993} +} + +@ARTICLE{Par92, + AUTHOR = {M. Parigot}, + JOURNAL = {Theoretical Computer Science}, + NUMBER = {2}, + PAGES = {335--356}, + TITLE = {{Recursive Programming with Proofs}}, + VOLUME = {94}, + YEAR = {1992} +} + +@INPROCEEDINGS{Parent95b, + AUTHOR = {C. Parent}, + BOOKTITLE = {{Mathematics of Program Construction'95}}, + PUBLISHER = SV, + SERIES = {LNCS}, + TITLE = {{Synthesizing proofs from programs in +the Calculus of Inductive Constructions}}, + VOLUME = {947}, + YEAR = {1995} +} + +@INPROCEEDINGS{Prasad93, + AUTHOR = {K.V. Prasad}, + BOOKTITLE = {{Proceedings of CONCUR'93}}, + PUBLISHER = SV, + SERIES = {LNCS}, + TITLE = {{Programming with broadcasts}}, + VOLUME = {715}, + YEAR = {1993} +} + +@BOOK{RC95, + author = "di~Cosmo, R.", + title = "Isomorphisms of Types: from $\lambda$-calculus to information + retrieval and language design", + series = "Progress in Theoretical Computer Science", + publisher = "Birkhauser", + year = "1995", + note = "ISBN-0-8176-3763-X" +} + +@TECHREPORT{Rou92, + AUTHOR = {J. Rouyer}, + INSTITUTION = {INRIA}, + MONTH = nov, + NUMBER = {1795}, + TITLE = {{D{\'e}veloppement de l'Algorithme d'Unification dans le Calcul des Constructions}}, + YEAR = {1992} +} + +@TECHREPORT{Saibi94, + AUTHOR = {A. Sa\"{\i}bi}, + INSTITUTION = {INRIA}, + MONTH = dec, + NUMBER = {2345}, + TITLE = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, + YEAR = {1994} +} + + +@MASTERSTHESIS{Ter92, + AUTHOR = {D. Terrasse}, + MONTH = sep, + SCHOOL = {IARFA}, + TITLE = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, + YEAR = {1992} +} + +@TECHREPORT{ThBeKa92, + AUTHOR = {L. Th\'ery and Y. Bertot and G. Kahn}, + INSTITUTION = {INRIA Sophia}, + MONTH = may, + NUMBER = {1684}, + TITLE = {Real theorem provers deserve real user-interfaces}, + TYPE = {Research Report}, + YEAR = {1992} +} + +@BOOK{TrDa89, + AUTHOR = {A.S. Troelstra and D. van Dalen}, + PUBLISHER = {North-Holland}, + SERIES = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, + TITLE = {Constructivism in Mathematics, an introduction}, + YEAR = {1988} +} + +@PHDTHESIS{Wer94, + AUTHOR = {B. Werner}, + SCHOOL = {Universit\'e Paris 7}, + TITLE = {Une th\'eorie des constructions inductives}, + TYPE = {Th\`ese de Doctorat}, + YEAR = {1994} +} + +@PHDTHESIS{Bar99, + AUTHOR = {B. Barras}, + SCHOOL = {Universit\'e Paris 7}, + TITLE = {Auto-validation d'un système de preuves avec familles inductives}, + TYPE = {Th\`ese de Doctorat}, + YEAR = {1999} +} + +@UNPUBLISHED{ddr98, + AUTHOR = {D. de Rauglaudre}, + TITLE = {Camlp4 version 1.07.2}, + YEAR = {1998}, + NOTE = {In Camlp4 distribution} +} + +@ARTICLE{dowek93, + AUTHOR = {G. Dowek}, + TITLE = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, + JOURNAL = {Journal Logic Computation}, + VOLUME = {3}, + NUMBER = {3}, + PAGES = {287--315}, + MONTH = {June}, + YEAR = {1993} +} + +@INPROCEEDINGS{manoury94, + AUTHOR = {P. Manoury}, + TITLE = {{A User's Friendly Syntax to Define +Recursive Functions as Typed $\lambda-$Terms}}, + BOOKTITLE = {{Types for Proofs and Programs, TYPES'94}}, + SERIES = {LNCS}, + VOLUME = {996}, + MONTH = jun, + YEAR = {1994} +} + +@TECHREPORT{maranget94, + AUTHOR = {L. Maranget}, + INSTITUTION = {INRIA}, + NUMBER = {2385}, + TITLE = {{Two Techniques for Compiling Lazy Pattern Matching}}, + YEAR = {1994} +} + +@INPROCEEDINGS{puel-suarez90, + AUTHOR = {L.Puel and A. Su\'arez}, + BOOKTITLE = {{Conference Lisp and Functional Programming}}, + SERIES = {ACM}, + PUBLISHER = SV, + TITLE = {{Compiling Pattern Matching by Term +Decomposition}}, + YEAR = {1990} +} + +@MASTERSTHESIS{saidi94, + AUTHOR = {H. Saidi}, + MONTH = sep, + SCHOOL = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + TITLE = {R\'esolution d'\'equations dans le syst\`eme T + de G\"odel}, + YEAR = {1994} +} + +@misc{streicher93semantical, + author = "T. Streicher", + title = "Semantical Investigations into Intensional Type Theory", + note = "Habilitationsschrift, LMU Munchen.", + year = "1993" } + + + +@Misc{Pcoq, + author = {Lemme Team}, + title = {Pcoq a graphical user-interface for {Coq}}, + note = {\url{http://www-sop.inria.fr/lemme/pcoq/}} +} + + +@Misc{ProofGeneral, + author = {David Aspinall}, + title = {Proof General}, + note = {\url{http://proofgeneral.inf.ed.ac.uk/}} +} + + + +@Book{CoqArt, + author = {Yves bertot and Pierre Castéran}, + title = {Coq'Art}, + publisher = {Springer-Verlag}, + year = 2004, + note = {To appear} +} + +@INCOLLECTION{wadler87, + AUTHOR = {P. Wadler}, + TITLE = {Efficient Compilation of Pattern Matching}, + BOOKTITLE = {The Implementation of Functional Programming +Languages}, + EDITOR = {S.L. Peyton Jones}, + PUBLISHER = {Prentice-Hall}, + YEAR = {1987} +} + + +@COMMENT{cross-references, must be at end} + +@BOOK{Bastad92, + EDITOR = {B. Nordstr\"om and K. Petersson and G. Plotkin}, + PUBLISHER = {Available by ftp at site ftp.inria.fr}, + TITLE = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, + YEAR = {1992} +} + +@BOOK{Nijmegen93, + EDITOR = {H. Barendregt and T. Nipkow}, + PUBLISHER = SV, + SERIES = LNCS, + TITLE = {Types for Proofs and Programs}, + VOLUME = {806}, + YEAR = {1994} +} + +@PHDTHESIS{Luo90, + AUTHOR = {Z. Luo}, + TITLE = {An Extended Calculus of Constructions}, + SCHOOL = {University of Edinburgh}, + YEAR = {1990} +} diff --git a/doc/faq/hevea.sty b/doc/faq/hevea.sty new file mode 100644 index 00000000..6d49aa8c --- /dev/null +++ b/doc/faq/hevea.sty @@ -0,0 +1,78 @@ +% hevea : hevea.sty +% This is a very basic style file for latex document to be processed +% with hevea. It contains definitions of LaTeX environment which are +% processed in a special way by the translator. +% Mostly : +% - latexonly, not processed by hevea, processed by latex. +% - htmlonly , the reverse. +% - rawhtml, to include raw HTML in hevea output. +% - toimage, to send text to the image file. +% The package also provides hevea logos, html related commands (ahref +% etc.), void cutting and image commands. +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{hevea}[2002/01/11] +\RequirePackage{comment} +\newif\ifhevea\heveafalse +\@ifundefined{ifimagen}{\newif\ifimagen\imagenfalse} +\makeatletter% +\newcommand{\heveasmup}[2]{% +\raise #1\hbox{$\m@th$% + \csname S@\f@size\endcsname + \fontsize\sf@size 0% + \math@fontsfalse\selectfont +#2% +}}% +\DeclareRobustCommand{\hevea}{H\kern-.15em\heveasmup{.2ex}{E}\kern-.15emV\kern-.15em\heveasmup{.2ex}{E}\kern-.15emA}% +\DeclareRobustCommand{\hacha}{H\kern-.15em\heveasmup{.2ex}{A}\kern-.15emC\kern-.1em\heveasmup{.2ex}{H}\kern-.15emA}% +\DeclareRobustCommand{\html}{\protect\heveasmup{0.ex}{HTML}} +%%%%%%%%% Hyperlinks hevea style +\newcommand{\ahref}[2]{{#2}} +\newcommand{\ahrefloc}[2]{{#2}} +\newcommand{\aname}[2]{{#2}} +\newcommand{\ahrefurl}[1]{\texttt{#1}} +\newcommand{\footahref}[2]{#2\footnote{\texttt{#1}}} +\newcommand{\mailto}[1]{\texttt{#1}} +\newcommand{\imgsrc}[2][]{} +\newcommand{\home}[1]{\protect\raisebox{-.75ex}{\char126}#1} +\AtBeginDocument +{\@ifundefined{url} +{%url package is not loaded +\let\url\ahref\let\oneurl\ahrefurl\let\footurl\footahref} +{}} +%% Void cutting instructions +\newcounter{cuttingdepth} +\newcommand{\tocnumber}{} +\newcommand{\notocnumber}{} +\newcommand{\cuttingunit}{} +\newcommand{\cutdef}[2][]{} +\newcommand{\cuthere}[2]{} +\newcommand{\cutend}{} +\newcommand{\htmlhead}[1]{} +\newcommand{\htmlfoot}[1]{} +\newcommand{\htmlprefix}[1]{} +\newenvironment{cutflow}[1]{}{} +\newcommand{\cutname}[1]{} +\newcommand{\toplinks}[3]{} +%%%% Html only +\excludecomment{rawhtml} +\newcommand{\rawhtmlinput}[1]{} +\excludecomment{htmlonly} +%%%% Latex only +\newenvironment{latexonly}{}{} +\newenvironment{verblatex}{}{} +%%%% Image file stuff +\def\toimage{\endgroup} +\def\endtoimage{\begingroup\def\@currenvir{toimage}} +\def\verbimage{\endgroup} +\def\endverbimage{\begingroup\def\@currenvir{verbimage}} +\newcommand{\imageflush}[1][]{} +%%% Bgcolor definition +\newsavebox{\@bgcolorbin} +\newenvironment{bgcolor}[2][] + {\newcommand{\@mycolor}{#2}\begin{lrbox}{\@bgcolorbin}\vbox\bgroup} + {\egroup\end{lrbox}% + \begin{flushleft}% + \colorbox{\@mycolor}{\usebox{\@bgcolorbin}}% + \end{flushleft}} +%%% Postlude +\makeatother diff --git a/doc/faq/interval_discr.v b/doc/faq/interval_discr.v new file mode 100644 index 00000000..972300da --- /dev/null +++ b/doc/faq/interval_discr.v @@ -0,0 +1,419 @@ +(** Sketch of the proof of {p:nat|p<=n} = {p:nat|p<=m} -> n=m + + - preliminary results on the irrelevance of boundedness proofs + - introduce the notion of finite cardinal |A| + - prove that |{p:nat|p<=n}| = n + - prove that |A| = n /\ |A| = m -> n = m if equality is decidable on A + - prove that equality is decidable on A + - conclude +*) + +(** * Preliminary results on [nat] and [le] *) + +(** Proving axiom K on [nat] *) + +Require Import Eqdep_dec. +Require Import Arith. + +Theorem eq_rect_eq_nat : + forall (p:nat) (Q:nat->Type) (x:Q p) (h:p=p), x = eq_rect p Q x p h. +Proof. +intros. +apply K_dec_set with (p := h). +apply eq_nat_dec. +reflexivity. +Qed. + +(** Proving unicity of proofs of [(n<=m)%nat] *) + +Scheme le_ind' := Induction for le Sort Prop. + +Theorem le_uniqueness_proof : forall (n m : nat) (p q : n <= m), p = q. +Proof. +induction p using le_ind'; intro q. + replace (le_n n) with + (eq_rect _ (fun n0 => n <= n0) (le_n n) _ (refl_equal n)). + 2:reflexivity. + generalize (refl_equal n). + pattern n at 2 4 6 10, q; case q; [intro | intros m l e]. + rewrite <- eq_rect_eq_nat; trivial. + contradiction (le_Sn_n m); rewrite <- e; assumption. + replace (le_S n m p) with + (eq_rect _ (fun n0 => n <= n0) (le_S n m p) _ (refl_equal (S m))). + 2:reflexivity. + generalize (refl_equal (S m)). + pattern (S m) at 1 3 4 6, q; case q; [intro Heq | intros m0 l HeqS]. + contradiction (le_Sn_n m); rewrite Heq; assumption. + injection HeqS; intro Heq; generalize l HeqS. + rewrite <- Heq; intros; rewrite <- eq_rect_eq_nat. + rewrite (IHp l0); reflexivity. +Qed. + +(** Proving irrelevance of boundedness proofs while building + elements of interval *) + +Lemma dep_pair_intro : + forall (n x y:nat) (Hx : x<=n) (Hy : y<=n), x=y -> + exist (fun x => x <= n) x Hx = exist (fun x => x <= n) y Hy. +Proof. +intros n x y Hx Hy Heq. +generalize Hy. +rewrite <- Heq. +intros. +rewrite (le_uniqueness_proof x n Hx Hy0). +reflexivity. +Qed. + +(** * Proving that {p:nat|p<=n} = {p:nat|p<=m} -> n=m *) + +(** Definition of having finite cardinality [n+1] for a set [A] *) + +Definition card (A:Set) n := + exists f, + (forall x:A, f x <= n) /\ + (forall x y:A, f x = f y -> x = y) /\ + (forall m, m <= n -> exists x:A, f x = m). + +Require Import Arith. + +(** Showing that the interval [0;n] has cardinality [n+1] *) + +Theorem card_interval : forall n, card {x:nat|x<=n} n. +Proof. +intro n. +exists (fun x:{x:nat|x<=n} => proj1_sig x). +split. +(* bounded *) +intro x; apply (proj2_sig x). +split. +(* injectivity *) +intros (p,Hp) (q,Hq). +simpl. +intro Hpq. +apply dep_pair_intro; assumption. +(* surjectivity *) +intros m Hmn. +exists (exist (fun x : nat => x <= n) m Hmn). +reflexivity. +Qed. + +(** Showing that equality on the interval [0;n] is decidable *) + +Lemma interval_dec : + forall n (x y : {m:nat|m<=n}), {x=y}+{x<>y}. +Proof. +intros n (p,Hp). +induction p; intros ([|q],Hq). +left. + apply dep_pair_intro. + reflexivity. +right. + intro H; discriminate H. +right. + intro H; discriminate H. +assert (Hp' : p <= n). + apply le_Sn_le; assumption. +assert (Hq' : q <= n). + apply le_Sn_le; assumption. +destruct (IHp Hp' (exist (fun m => m <= n) q Hq')) + as [Heq|Hneq]. +left. + injection Heq; intro Heq'. + apply dep_pair_intro. + apply eq_S. + assumption. +right. + intro HeqS. + injection HeqS; intro Heq. + apply Hneq. + apply dep_pair_intro. + assumption. +Qed. + +(** Showing that the cardinality relation is functional on decidable sets *) + +Lemma card_inj_aux : + forall (A:Type) f g n, + (forall x:A, f x <= 0) -> + (forall x y:A, f x = f y -> x = y) -> + (forall m, m <= S n -> exists x:A, g x = m) + -> False. +Proof. +intros A f g n Hfbound Hfinj Hgsurj. +destruct (Hgsurj (S n) (le_n _)) as (x,Hx). +destruct (Hgsurj n (le_S _ _ (le_n _))) as (x',Hx'). +assert (Hfx : 0 = f x). +apply le_n_O_eq. +apply Hfbound. +assert (Hfx' : 0 = f x'). +apply le_n_O_eq. +apply Hfbound. +assert (x=x'). +apply Hfinj. +rewrite <- Hfx. +rewrite <- Hfx'. +reflexivity. +rewrite H in Hx. +rewrite Hx' in Hx. +apply (n_Sn _ Hx). +Qed. + +(** For [dec_restrict], we use a lemma on the negation of equality +that requires proof-irrelevance. It should be possible to avoid this +lemma by generalizing over a first-order definition of [x<>y], say +[neq] such that [{x=y}+{neq x y}] and [~(x=y /\ neq x y)]; for such +[neq], unicity of proofs could be proven *) + + Require Import Classical. + Lemma neq_dep_intro : + forall (A:Set) (z x y:A) (p:x<>z) (q:y<>z), x=y -> + exist (fun x => x <> z) x p = exist (fun x => x <> z) y q. + Proof. + intros A z x y p q Heq. + generalize q; clear q; rewrite <- Heq; intro q. + rewrite (proof_irrelevance _ p q); reflexivity. + Qed. + +Lemma dec_restrict : + forall (A:Set), + (forall x y :A, {x=y}+{x<>y}) -> + forall z (x y :{a:A|a<>z}), {x=y}+{x<>y}. +Proof. +intros A Hdec z (x,Hx) (y,Hy). +destruct (Hdec x y) as [Heq|Hneq]. +left; apply neq_dep_intro; assumption. +right; intro Heq; injection Heq; exact Hneq. +Qed. + +Lemma pred_inj : forall n m, + 0 <> n -> 0 <> m -> pred m = pred n -> m = n. +Proof. +destruct n. +intros m H; destruct H; reflexivity. +destruct m. +intros _ H; destruct H; reflexivity. +simpl; intros _ _ H. +rewrite H. +reflexivity. +Qed. + +Lemma le_neq_lt : forall n m, n <= m -> n<>m -> n < m. +Proof. +intros n m Hle Hneq. +destruct (le_lt_eq_dec n m Hle). +assumption. +contradiction. +Qed. + +Lemma inj_restrict : + forall (A:Set) (f:A->nat) x y z, + (forall x y : A, f x = f y -> x = y) + -> x <> z -> f y < f z -> f z <= f x + -> pred (f x) = f y + -> False. + +(* Search error sans le type de f !! *) +Proof. +intros A f x y z Hfinj Hneqx Hfy Hfx Heq. +assert (f z <> f x). + apply sym_not_eq. + intro Heqf. + apply Hneqx. + apply Hfinj. + assumption. +assert (f x = S (f y)). + assert (0 < f x). + apply le_lt_trans with (f z). + apply le_O_n. + apply le_neq_lt; assumption. + apply pred_inj. + apply O_S. + apply lt_O_neq; assumption. + exact Heq. +assert (f z <= f y). +destruct (le_lt_or_eq _ _ Hfx). + apply lt_n_Sm_le. + rewrite <- H0. + assumption. + contradiction Hneqx. + symmetry. + apply Hfinj. + assumption. +contradiction (lt_not_le (f y) (f z)). +Qed. + +Theorem card_inj : forall m n (A:Set), + (forall x y :A, {x=y}+{x<>y}) -> + card A m -> card A n -> m = n. +Proof. +induction m; destruct n; +intros A Hdec + (f,(Hfbound,(Hfinj,Hfsurj))) + (g,(Hgbound,(Hginj,Hgsurj))). +(* 0/0 *) +reflexivity. +(* 0/Sm *) +destruct (card_inj_aux _ _ _ _ Hfbound Hfinj Hgsurj). +(* Sn/0 *) +destruct (card_inj_aux _ _ _ _ Hgbound Hginj Hfsurj). +(* Sn/Sm *) +destruct (Hgsurj (S n) (le_n _)) as (xSn,HSnx). +rewrite IHm with (n:=n) (A := {x:A|x<>xSn}). +reflexivity. +(* decidability of eq on {x:A|x<>xSm} *) +apply dec_restrict. +assumption. +(* cardinality of {x:A|x<>xSn} is m *) +pose (f' := fun x' : {x:A|x<>xSn} => + let (x,Hneq) := x' in + if le_lt_dec (f xSn) (f x) + then pred (f x) + else f x). +exists f'. +split. +(* f' is bounded *) +unfold f'. +intros (x,_). +destruct (le_lt_dec (f xSn) (f x)) as [Hle|Hge]. +change m with (pred (S m)). +apply le_pred. +apply Hfbound. +apply le_S_n. +apply le_trans with (f xSn). +exact Hge. +apply Hfbound. +split. +(* f' is injective *) +unfold f'. +intros (x,Hneqx) (y,Hneqy) Heqf'. +destruct (le_lt_dec (f xSn) (f x)) as [Hlefx|Hgefx]; +destruct (le_lt_dec (f xSn) (f y)) as [Hlefy|Hgefy]. +(* f xSn <= f x et f xSn <= f y *) +assert (Heq : x = y). + apply Hfinj. + assert (f xSn <> f y). + apply sym_not_eq. + intro Heqf. + apply Hneqy. + apply Hfinj. + assumption. + assert (0 < f y). + apply le_lt_trans with (f xSn). + apply le_O_n. + apply le_neq_lt; assumption. + assert (f xSn <> f x). + apply sym_not_eq. + intro Heqf. + apply Hneqx. + apply Hfinj. + assumption. + assert (0 < f x). + apply le_lt_trans with (f xSn). + apply le_O_n. + apply le_neq_lt; assumption. + apply pred_inj. + apply lt_O_neq; assumption. + apply lt_O_neq; assumption. + assumption. +apply neq_dep_intro; assumption. +(* f y < f xSn <= f x *) +destruct (inj_restrict A f x y xSn); assumption. +(* f x < f xSn <= f y *) +symmetry in Heqf'. +destruct (inj_restrict A f y x xSn); assumption. +(* f x < f xSn et f y < f xSn *) +assert (Heq : x=y). + apply Hfinj; assumption. +apply neq_dep_intro; assumption. +(* f' is surjective *) +intros p Hlep. +destruct (le_lt_dec (f xSn) p) as [Hle|Hlt]. +(* case f xSn <= p *) +destruct (Hfsurj (S p) (le_n_S _ _ Hlep)) as (x,Hx). +assert (Hneq : x <> xSn). + intro Heqx. + rewrite Heqx in Hx. + rewrite Hx in Hle. + apply le_Sn_n with p; assumption. +exists (exist (fun a => a<>xSn) x Hneq). +unfold f'. +destruct (le_lt_dec (f xSn) (f x)) as [Hle'|Hlt']. +rewrite Hx; reflexivity. +rewrite Hx in Hlt'. +contradiction (le_not_lt (f xSn) p). +apply lt_trans with (S p). +apply lt_n_Sn. +assumption. +(* case p < f xSn *) +destruct (Hfsurj p (le_S _ _ Hlep)) as (x,Hx). +assert (Hneq : x <> xSn). + intro Heqx. + rewrite Heqx in Hx. + rewrite Hx in Hlt. + apply (lt_irrefl p). + assumption. +exists (exist (fun a => a<>xSn) x Hneq). +unfold f'. +destruct (le_lt_dec (f xSn) (f x)) as [Hle'|Hlt']. + rewrite Hx in Hle'. + contradiction (lt_irrefl p). + apply lt_le_trans with (f xSn); assumption. + assumption. +(* cardinality of {x:A|x<>xSn} is n *) +pose (g' := fun x' : {x:A|x<>xSn} => + let (x,Hneq) := x' in + if Hdec x xSn then 0 else g x). +exists g'. +split. +(* g is bounded *) +unfold g'. +intros (x,_). +destruct (Hdec x xSn) as [_|Hneq]. +apply le_O_n. +assert (Hle_gx:=Hgbound x). +destruct (le_lt_or_eq _ _ Hle_gx). +apply lt_n_Sm_le. +assumption. +contradiction Hneq. +apply Hginj. +rewrite HSnx. +assumption. +split. +(* g is injective *) +unfold g'. +intros (x,Hneqx) (y,Hneqy) Heqg'. +destruct (Hdec x xSn) as [Heqx|_]. +contradiction Hneqx. +destruct (Hdec y xSn) as [Heqy|_]. +contradiction Hneqy. +assert (Heq : x=y). + apply Hginj; assumption. +apply neq_dep_intro; assumption. +(* g is surjective *) +intros p Hlep. +destruct (Hgsurj p (le_S _ _ Hlep)) as (x,Hx). +assert (Hneq : x<>xSn). + intro Heq. + rewrite Heq in Hx. + rewrite Hx in HSnx. + rewrite HSnx in Hlep. + contradiction (le_Sn_n _ Hlep). +exists (exist (fun a => a<>xSn) x Hneq). +simpl. +destruct (Hdec x xSn) as [Heqx|_]. +contradiction Hneq. +assumption. +Qed. + +(** Conclusion *) + +Theorem interval_discr : + forall n m, {p:nat|p<=n} = {p:nat|p<=m} -> n=m. +Proof. +intros n m Heq. +apply card_inj with (A := {p:nat|p<=n}). +apply interval_dec. +apply card_interval. +rewrite Heq. +apply card_interval. +Qed. diff --git a/doc/refman/AddRefMan-pre.tex b/doc/refman/AddRefMan-pre.tex new file mode 100644 index 00000000..5312b8fc --- /dev/null +++ b/doc/refman/AddRefMan-pre.tex @@ -0,0 +1,58 @@ +%\coverpage{Addendum to the Reference Manual}{\ } +%\addcontentsline{toc}{part}{Additional documentation} +\setheaders{Presentation of the Addendum} +\chapter*{Presentation of the Addendum} + +Here you will find several pieces of additional documentation for the +\Coq\ Reference Manual. Each of this chapters is concentrated on a +particular topic, that should interest only a fraction of the \Coq\ +users: that's the reason why they are apart from the Reference +Manual. + +\begin{description} + +\item[Extended pattern-matching] This chapter details the use of + generalized pattern-matching. It is contributed by Cristina Cornes + and Hugo Herbelin. + +\item[Implicit coercions] This chapter details the use of the coercion + mechanism. It is contributed by Amokrane Saïbi. + +%\item[Proof of imperative programs] This chapter explains how to +% prove properties of annotated programs with imperative features. +% It is contributed by Jean-Christophe Filliâtre + +\item[Program extraction] This chapter explains how to extract in practice ML + files from $\FW$ terms. It is contributed by Jean-Christophe + Filliâtre and Pierre Letouzey. + +%\item[Natural] This chapter is due to Yann Coscoy. It is the user +% manual of the tools he wrote for printing proofs in natural +% language. At this time, French and English languages are supported. + +\item[omega] \texttt{omega}, written by Pierre Crégut, solves a whole + class of arithmetic problems. + +%\item[Program] The \texttt{Program} technology intends to inverse the +% extraction mechanism. It allows the developments of certified +% programs in \Coq. This chapter is due to Catherine Parent. {\bf This +% feature is not available in {\Coq} version 7.} + +\item[The {\tt ring} tactic] This is a tactic to do AC rewriting. This + chapter explains how to use it and how it works. + The chapter is contributed by Patrick Loiseleur. + +\item[The {\tt Setoid\_replace} tactic] This is a + tactic to do rewriting on types equipped with specific (only partially + substitutive) equality. The chapter is contributed by Clément Renard. + + +\end{description} + +\atableofcontents + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Cases.tex b/doc/refman/Cases.tex new file mode 100644 index 00000000..95411afa --- /dev/null +++ b/doc/refman/Cases.tex @@ -0,0 +1,698 @@ +\achapter{Extended pattern-matching}\defaultheaders +\aauthor{Cristina Cornes} + +\label{Mult-match-full} +\ttindex{Cases} +\index{ML-like patterns} + +This section describes the full form of pattern-matching in {\Coq} terms. + +\asection{Patterns}\label{implementation} The full syntax of {\tt +match} is presented in figures~\ref{term-syntax} +and~\ref{term-syntax-aux}. Identifiers in patterns are either +constructor names or variables. Any identifier that is not the +constructor of an inductive or coinductive type is considered to be a +variable. A variable name cannot occur more than once in a given +pattern. It is recommended to start variable names by a lowercase +letter. + +If a pattern has the form $(c~\vec{x})$ where $c$ is a constructor +symbol and $\vec{x}$ is a linear vector of variables, it is called +{\em simple}: it is the kind of pattern recognized by the basic +version of {\tt match}. If a pattern is +not simple we call it {\em nested}. + +A variable pattern matches any value, and the identifier is bound to +that value. The pattern ``\texttt{\_}'' (called ``don't care'' or +``wildcard'' symbol) also matches any value, but does not bind anything. It +may occur an arbitrary number of times in a pattern. Alias patterns +written \texttt{(}{\sl pattern} \texttt{as} {\sl identifier}\texttt{)} are +also accepted. This pattern matches the same values as {\sl pattern} +does and {\sl identifier} is bound to the matched value. A list of +patterns separated with commas +is also considered as a pattern and is called {\em multiple +pattern}. + +Since extended {\tt match} expressions are compiled into the primitive +ones, the expressiveness of the theory remains the same. Once the +stage of parsing has finished only simple patterns remain. An easy way +to see the result of the expansion is by printing the term with +\texttt{Print} if the term is a constant, or using the command +\texttt{Check}. + +The extended \texttt{match} still accepts an optional {\em elimination +predicate} given after the keyword \texttt{return}. Given a pattern +matching expression, if all the right hand sides of \texttt{=>} ({\em +rhs} in short) have the same type, then this type can be sometimes +synthesized, and so we can omit the \texttt{return} part. Otherwise +the predicate after \texttt{return} has to be provided, like for the basic +\texttt{match}. + +Let us illustrate through examples the different aspects of extended +pattern matching. Consider for example the function that computes the +maximum of two natural numbers. We can write it in primitive syntax +by: + +\begin{coq_example} +Fixpoint max (n m:nat) {struct m} : nat := + match n with + | O => m + | S n' => match m with + | O => S n' + | S m' => S (max n' m') + end + end. +\end{coq_example} + +Using multiple patterns in the definition allows to write: + +\begin{coq_example} +Reset max. +Fixpoint max (n m:nat) {struct m} : nat := + match n, m with + | O, _ => m + | S n', O => S n' + | S n', S m' => S (max n' m') + end. +\end{coq_example} + +which will be compiled into the previous form. + +The pattern-matching compilation strategy examines patterns from left +to right. A \texttt{match} expression is generated {\bf only} when +there is at least one constructor in the column of patterns. E.g. the +following example does not build a \texttt{match} expression. + +\begin{coq_example} +Check (fun x:nat => match x return nat with + | y => y + end). +\end{coq_example} + +We can also use ``\texttt{as} patterns'' to associate a name to a +sub-pattern: + +\begin{coq_example} +Reset max. +Fixpoint max (n m:nat) {struct n} : nat := + match n, m with + | O, _ => m + | S n' as p, O => p + | S n', S m' => S (max n' m') + end. +\end{coq_example} + +Here is now an example of nested patterns: + +\begin{coq_example} +Fixpoint even (n:nat) : bool := + match n with + | O => true + | S O => false + | S (S n') => even n' + end. +\end{coq_example} + +This is compiled into: + +\begin{coq_example} +Print even. +\end{coq_example} + +In the previous examples patterns do not conflict with, but +sometimes it is comfortable to write patterns that admit a non +trivial superposition. Consider +the boolean function \texttt{lef} that given two natural numbers +yields \texttt{true} if the first one is less or equal than the second +one and \texttt{false} otherwise. We can write it as follows: + +\begin{coq_example} +Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | x, O => false + | S n, S m => lef n m + end. +\end{coq_example} + +Note that the first and the second multiple pattern superpose because +the couple of values \texttt{O O} matches both. Thus, what is the result +of the function on those values? To eliminate ambiguity we use the +{\em textual priority rule}: we consider patterns ordered from top to +bottom, then a value is matched by the pattern at the $ith$ row if and +only if it is not matched by some pattern of a previous row. Thus in the +example, +\texttt{O O} is matched by the first pattern, and so \texttt{(lef O O)} +yields \texttt{true}. + +Another way to write this function is: + +\begin{coq_example} +Reset lef. +Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | S n, S m => lef n m + | _, _ => false + end. +\end{coq_example} + + +Here the last pattern superposes with the first two. Because +of the priority rule, the last pattern +will be used only for values that do not match neither the first nor +the second one. + +Terms with useless patterns are not accepted by the +system. Here is an example: +% Test failure +\begin{coq_eval} +Set Printing Depth 50. + (********** The following is not correct and should produce **********) + (**************** Error: This clause is redundant ********************) +\end{coq_eval} +\begin{coq_example} +Check (fun x:nat => + match x with + | O => true + | S _ => false + | x => true + end). +\end{coq_example} + +\asection{About patterns of parametric types} +When matching objects of a parametric type, constructors in patterns +{\em do not expect} the parameter arguments. Their value is deduced +during expansion. + +Consider for example the polymorphic lists: + +\begin{coq_example} +Inductive List (A:Set) : Set := + | nil : List A + | cons : A -> List A -> List A. +\end{coq_example} + +We can check the function {\em tail}: + +\begin{coq_example} +Check + (fun l:List nat => + match l with + | nil => nil nat + | cons _ l' => l' + end). +\end{coq_example} + + +When we use parameters in patterns there is an error message: +% Test failure +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is not correct and should produce **********) +(******** Error: The constructor cons expects 2 arguments ************) +\end{coq_eval} +\begin{coq_example} +Check + (fun l:List nat => + match l with + | nil A => nil nat + | cons A _ l' => l' + end). +\end{coq_example} + + + +\asection{Matching objects of dependent types} +The previous examples illustrate pattern matching on objects of +non-dependent types, but we can also +use the expansion strategy to destructure objects of dependent type. +Consider the type \texttt{listn} of lists of a certain length: + +\begin{coq_example} +Inductive listn : nat -> Set := + | niln : listn 0 + | consn : forall n:nat, nat -> listn n -> listn (S n). +\end{coq_example} + +\asubsection{Understanding dependencies in patterns} +We can define the function \texttt{length} over \texttt{listn} by: + +\begin{coq_example} +Definition length (n:nat) (l:listn n) := n. +\end{coq_example} + +Just for illustrating pattern matching, +we can define it by case analysis: + +\begin{coq_example} +Reset length. +Definition length (n:nat) (l:listn n) := + match l with + | niln => 0 + | consn n _ _ => S n + end. +\end{coq_example} + +We can understand the meaning of this definition using the +same notions of usual pattern matching. + +% +% Constraining of dependencies is not longer valid in V7 +% +\iffalse +Now suppose we split the second pattern of \texttt{length} into two +cases so to give an +alternative definition using nested patterns: +\begin{coq_example} +Definition length1 (n:nat) (l:listn n) := + match l with + | niln => 0 + | consn n _ niln => S n + | consn n _ (consn _ _ _) => S n + end. +\end{coq_example} + +It is obvious that \texttt{length1} is another version of +\texttt{length}. We can also give the following definition: +\begin{coq_example} +Definition length2 (n:nat) (l:listn n) := + match l with + | niln => 0 + | consn n _ niln => 1 + | consn n _ (consn m _ _) => S (S m) + end. +\end{coq_example} + +If we forget that \texttt{listn} is a dependent type and we read these +definitions using the usual semantics of pattern matching, we can conclude +that \texttt{length1} +and \texttt{length2} are different functions. +In fact, they are equivalent +because the pattern \texttt{niln} implies that \texttt{n} can only match +the value $0$ and analogously the pattern \texttt{consn} determines that \texttt{n} can +only match values of the form $(S~v)$ where $v$ is the value matched by +\texttt{m}. + +The converse is also true. If +we destructure the length value with the pattern \texttt{O} then the list +value should be $niln$. +Thus, the following term \texttt{length3} corresponds to the function +\texttt{length} but this time defined by case analysis on the dependencies instead of on the list: + +\begin{coq_example} +Definition length3 (n:nat) (l:listn n) := + match l with + | niln => 0 + | consn O _ _ => 1 + | consn (S n) _ _ => S (S n) + end. +\end{coq_example} + +When we have nested patterns of dependent types, the semantics of +pattern matching becomes a little more difficult because +the set of values that are matched by a sub-pattern may be conditioned by the +values matched by another sub-pattern. Dependent nested patterns are +somehow constrained patterns. +In the examples, the expansion of +\texttt{length1} and \texttt{length2} yields exactly the same term + but the +expansion of \texttt{length3} is completely different. \texttt{length1} and +\texttt{length2} are expanded into two nested case analysis on +\texttt{listn} while \texttt{length3} is expanded into a case analysis on +\texttt{listn} containing a case analysis on natural numbers inside. + + +In practice the user can think about the patterns as independent and +it is the expansion algorithm that cares to relate them. \\ +\fi +% +% +% + +\asubsection{When the elimination predicate must be provided} +The examples given so far do not need an explicit elimination predicate + because all the rhs have the same type and the +strategy succeeds to synthesize it. +Unfortunately when dealing with dependent patterns it often happens +that we need to write cases where the type of the rhs are +different instances of the elimination predicate. +The function \texttt{concat} for \texttt{listn} +is an example where the branches have different type +and we need to provide the elimination predicate: + +\begin{coq_example} +Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n return listn (n + m) with + | niln => l' + | consn n' a y => consn (n' + m) a (concat n' y m l') + end. +\end{coq_example} +The elimination predicate is {\tt fun (n:nat) (l:listn n) => listn~(n+m)}. +In general if $m$ has type $(I~q_1\ldots q_r~t_1\ldots t_s)$ where +$q_1\ldots q_r$ are parameters, the elimination predicate should be of +the form~: +{\tt fun $y_1$\ldots $y_s$ $x$:($I$~$q_1$\ldots $q_r$~$y_1$\ldots + $y_s$) => P}. + +In the concrete syntax, it should be written~: +\[ \kw{match}~m~\kw{as}~x~\kw{in}~(I~\_\ldots \_~y_1\ldots y_s)~\kw{return}~Q~\kw{with}~\ldots~\kw{end}\] + +The variables which appear in the \kw{in} and \kw{as} clause are new +and bounded in the property $Q$ in the \kw{return} clause. The +parameters of the inductive definitions should not be mentioned and +are replaced by \kw{\_}. + +Recall that a list of patterns is also a pattern. So, when +we destructure several terms at the same time and the branches have +different type we need to provide +the elimination predicate for this multiple pattern. +It is done using the same scheme, each term may be associated to an +\kw{as} and \kw{in} clause in order to introduce a dependent product. + +For example, an equivalent definition for \texttt{concat} (even though the matching on the second term is trivial) would have +been: + +\begin{coq_example} +Reset concat. +Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n, l' return listn (n + m) with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. +\end{coq_example} + +% Notice that this time, the predicate \texttt{[n,\_:nat](listn (plus n +% m))} is binary because we +% destructure both \texttt{l} and \texttt{l'} whose types have arity one. +% In general, if we destructure the terms $e_1\ldots e_n$ +% the predicate will be of arity $m$ where $m$ is the sum of the +% number of dependencies of the type of $e_1, e_2,\ldots e_n$ +% (the $\lambda$-abstractions +% should correspond from left to right to each dependent argument of the +% type of $e_1\ldots e_n$). +When the arity of the predicate (i.e. number of abstractions) is not +correct Coq raises an error message. For example: + +% Test failure +\begin{coq_eval} +Reset concat. +Set Printing Depth 50. +(********** The following is not correct and should produce ***********) +(** Error: the term l' has type listn m while it is expected to have **) +(** type listn (?31 + ?32) **) +\end{coq_eval} +\begin{coq_example} +Fixpoint concat + (n:nat) (l:listn n) (m:nat) + (l':listn m) {struct l} : listn (n + m) := + match l, l' with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. +\end{coq_example} + +\asection{Using pattern matching to write proofs} +In all the previous examples the elimination predicate does not depend +on the object(s) matched. But it may depend and the typical case +is when we write a proof by induction or a function that yields an +object of dependent type. An example of proof using \texttt{match} in +given in section \ref{refine-example} + +For example, we can write +the function \texttt{buildlist} that given a natural number +$n$ builds a list of length $n$ containing zeros as follows: + +\begin{coq_example} +Fixpoint buildlist (n:nat) : listn n := + match n return listn n with + | O => niln + | S n => consn n 0 (buildlist n) + end. +\end{coq_example} + +We can also use multiple patterns. +Consider the following definition of the predicate less-equal +\texttt{Le}: + +\begin{coq_example} +Inductive LE : nat -> nat -> Prop := + | LEO : forall n:nat, LE 0 n + | LES : forall n m:nat, LE n m -> LE (S n) (S m). +\end{coq_example} + +We can use multiple patterns to write the proof of the lemma + \texttt{(n,m:nat) (LE n m)}\verb=\/=\texttt{(LE m n)}: + +\begin{coq_example} +Fixpoint dec (n m:nat) {struct n} : LE n m \/ LE m n := + match n, m return LE n m \/ LE m n with + | O, x => or_introl (LE x 0) (LEO x) + | x, O => or_intror (LE x 0) (LEO x) + | S n as n', S m as m' => + match dec n m with + | or_introl h => or_introl (LE m' n') (LES n m h) + | or_intror h => or_intror (LE n' m') (LES m n h) + end + end. +\end{coq_example} +In the example of \texttt{dec}, +the first \texttt{match} is dependent while +the second is not. + +% In general, consider the terms $e_1\ldots e_n$, +% where the type of $e_i$ is an instance of a family type +% $\lb (\vec{d_i}:\vec{D_i}) \mto T_i$ ($1\leq i +% \leq n$). Then, in expression \texttt{match} $e_1,\ldots, +% e_n$ \texttt{of} \ldots \texttt{end}, the +% elimination predicate ${\cal P}$ should be of the form: +% $[\vec{d_1}:\vec{D_1}][x_1:T_1]\ldots [\vec{d_n}:\vec{D_n}][x_n:T_n]Q.$ + +The user can also use \texttt{match} in combination with the tactic +\texttt{refine} (see section \ref{refine}) to build incomplete proofs +beginning with a \texttt{match} construction. + +\asection{Pattern-matching on inductive objects involving local +definitions} + +If local definitions occur in the type of a constructor, then there +are two ways to match on this constructor. Either the local +definitions are skipped and matching is done only on the true arguments +of the constructors, or the bindings for local definitions can also +be caught in the matching. + +Example. + +\begin{coq_eval} +Reset Initial. +Require Import Arith. +\end{coq_eval} + +\begin{coq_example*} +Inductive list : nat -> Set := + | nil : list 0 + | cons : forall n:nat, let m := (2 * n) in list m -> list (S (S m)). +\end{coq_example*} + +In the next example, the local definition is not caught. + +\begin{coq_example} +Fixpoint length n (l:list n) {struct l} : nat := + match l with + | nil => 0 + | cons n l0 => S (length (2 * n) l0) + end. +\end{coq_example} + +But in this example, it is. + +\begin{coq_example} +Fixpoint length' n (l:list n) {struct l} : nat := + match l with + | nil => 0 + | cons _ m l0 => S (length' m l0) + end. +\end{coq_example} + +\Rem for a given matching clause, either none of the local +definitions or all of them can be caught. + +\asection{Pattern-matching and coercions} + +If a mismatch occurs between the expected type of a pattern and its +actual type, a coercion made from constructors is sought. If such a +coercion can be found, it is automatically inserted around the +pattern. + +Example: + +\begin{coq_example} +Inductive I : Set := + | C1 : nat -> I + | C2 : I -> I. +Coercion C1 : nat >-> I. +Check (fun x => match x with + | C2 O => 0 + | _ => 0 + end). +\end{coq_example} + + +\asection{When does the expansion strategy fail ?}\label{limitations} +The strategy works very like in ML languages when treating +patterns of non-dependent type. +But there are new cases of failure that are due to the presence of +dependencies. + +The error messages of the current implementation may be sometimes +confusing. When the tactic fails because patterns are somehow +incorrect then error messages refer to the initial expression. But the +strategy may succeed to build an expression whose sub-expressions are +well typed when the whole expression is not. In this situation the +message makes reference to the expanded expression. We encourage +users, when they have patterns with the same outer constructor in +different equations, to name the variable patterns in the same +positions with the same name. +E.g. to write {\small\texttt{(cons n O x) => e1}} +and {\small\texttt{(cons n \_ x) => e2}} instead of +{\small\texttt{(cons n O x) => e1}} and +{\small\texttt{(cons n' \_ x') => e2}}. +This helps to maintain certain name correspondence between the +generated expression and the original. + +Here is a summary of the error messages corresponding to each situation: + +\begin{ErrMsgs} +\item \sverb{The constructor } {\sl + ident} \sverb{expects } {\sl num} \sverb{arguments} + + \sverb{The variable } {\sl ident} \sverb{is bound several times + in pattern } {\sl term} + + \sverb{Found a constructor of inductive type} {\term} + \sverb{while a constructor of} {\term} \sverb{is expected} + + Patterns are incorrect (because constructors are not applied to + the correct number of the arguments, because they are not linear or + they are wrongly typed) + +\item \errindex{Non exhaustive pattern-matching} + +the pattern matching is not exhaustive + +\item \sverb{The elimination predicate } {\sl term} \sverb{should be + of arity } {\sl num} \sverb{(for non dependent case) or } {\sl + num} \sverb{(for dependent case)} + +The elimination predicate provided to \texttt{match} has not the + expected arity + + +%\item the whole expression is wrongly typed + +% CADUC ? +% , or the synthesis of +% implicit arguments fails (for example to find the elimination +% predicate or to resolve implicit arguments in the rhs). + +% There are {\em nested patterns of dependent type}, the elimination +% predicate corresponds to non-dependent case and has the form +% $[x_1:T_1]...[x_n:T_n]T$ and {\bf some} $x_i$ occurs {\bf free} in +% $T$. Then, the strategy may fail to find out a correct elimination +% predicate during some step of compilation. In this situation we +% recommend the user to rewrite the nested dependent patterns into +% several \texttt{match} with {\em simple patterns}. + +\item {\tt Unable to infer a match predicate\\ + Either there is a type incompatiblity or the problem involves\\ + dependencies} + + There is a type mismatch between the different branches + + Then the user should provide an elimination predicate. + +% Obsolete ? +% \item because of nested patterns, it may happen that even though all +% the rhs have the same type, the strategy needs dependent elimination +% and so an elimination predicate must be provided. The system warns +% about this situation, trying to compile anyway with the +% non-dependent strategy. The risen message is: + +% \begin{itemize} +% \item {\tt Warning: This pattern matching may need dependent +% elimination to be compiled. I will try, but if fails try again +% giving dependent elimination predicate.} +% \end{itemize} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % LA PROPAGATION DES CONTRAINTES ARRIERE N'EST PAS FAITE DANS LA V7 +% TODO +% \item there are {\em nested patterns of dependent type} and the +% strategy builds a term that is well typed but recursive calls in fix +% point are reported as illegal: +% \begin{itemize} +% \item {\tt Error: Recursive call applied to an illegal term ...} +% \end{itemize} + +% This is because the strategy generates a term that is correct w.r.t. +% the initial term but which does not pass the guard condition. In +% this situation we recommend the user to transform the nested dependent +% patterns into {\em several \texttt{match} of simple patterns}. Let us +% explain this with an example. Consider the following definition of a +% function that yields the last element of a list and \texttt{O} if it is +% empty: + +% \begin{coq_example} +% Fixpoint last [n:nat; l:(listn n)] : nat := +% match l of +% (consn _ a niln) => a +% | (consn m _ x) => (last m x) | niln => O +% end. +% \end{coq_example} + +% It fails because of the priority between patterns, we know that this +% definition is equivalent to the following more explicit one (which +% fails too): + +% \begin{coq_example*} +% Fixpoint last [n:nat; l:(listn n)] : nat := +% match l of +% (consn _ a niln) => a +% | (consn n _ (consn m b x)) => (last n (consn m b x)) +% | niln => O +% end. +% \end{coq_example*} + +% Note that the recursive call {\tt (last n (consn m b x))} is not +% guarded. When treating with patterns of dependent types the strategy +% interprets the first definition of \texttt{last} as the second +% one\footnote{In languages of the ML family the first definition would +% be translated into a term where the variable \texttt{x} is shared in +% the expression. When patterns are of non-dependent types, Coq +% compiles as in ML languages using sharing. When patterns are of +% dependent types the compilation reconstructs the term as in the +% second definition of \texttt{last} so to ensure the result of +% expansion is well typed.}. Thus it generates a term where the +% recursive call is rejected by the guard condition. + +% You can get rid of this problem by writing the definition with +% \emph{simple patterns}: + +% \begin{coq_example} +% Fixpoint last [n:nat; l:(listn n)] : nat := +% <[_:nat]nat>match l of +% (consn m a x) => Cases x of niln => a | _ => (last m x) end +% | niln => O +% end. +% \end{coq_example} + +\end{ErrMsgs} + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Coercion.tex b/doc/refman/Coercion.tex new file mode 100644 index 00000000..5445224b --- /dev/null +++ b/doc/refman/Coercion.tex @@ -0,0 +1,541 @@ +\achapter{Implicit Coercions} +\aauthor{Amokrane Saïbi} + +\label{Coercions-full} +\index{Coercions!presentation} + +\asection{General Presentation} + +This section describes the inheritance mechanism of {\Coq}. In {\Coq} with +inheritance, we are not interested in adding any expressive power to +our theory, but only convenience. Given a term, possibly not typable, +we are interested in the problem of determining if it can be well +typed modulo insertion of appropriate coercions. We allow to write: + +\begin{itemize} +\item $f~a$ where $f:forall~ x:A, B$ and $a:A'$ when $A'$ can + be seen in some sense as a subtype of $A$. +\item $x:A$ when $A$ is not a type, but can be seen in + a certain sense as a type: set, group, category etc. +\item $f~a$ when $f$ is not a function, but can be seen in a certain sense + as a function: bijection, functor, any structure morphism etc. +\end{itemize} + +\asection{Classes} +\index{Coercions!classes} + A class with $n$ parameters is any defined name with a type +$forall~ (x_1:A_1)..(x_n:A_n), s$ where $s$ is a sort. Thus a class with +parameters is considered as a single class and not as a family of +classes. An object of a class $C$ is any term of type $C~t_1 +.. t_n$. In addition to these user-classes, we have two abstract +classes: + +\begin{itemize} +\item {\tt Sortclass}, the class of sorts; + its objects are the terms whose type is a sort. +\item {\tt Funclass}, the class of functions; + its objects are all the terms with a functional + type, i.e. of form $forall~ x:A, B$. +\end{itemize} + +Formally, the syntax of a classes is defined on Figure~\ref{fig:classes}. +\begin{figure} +\begin{centerframe} +\begin{tabular}{lcl} +{\class} & ::= & {\qualid} \\ + & $|$ & {\tt Sortclass} \\ + & $|$ & {\tt Funclass} +\end{tabular} +\end{centerframe} +\caption{Syntax of classes} +\label{fig:classes} +\end{figure} + +\asection{Coercions} +\index{Coercions!Funclass} +\index{Coercions!Sortclass} + A name $f$ can be declared as a coercion between a source user-class +$C$ with $n$ parameters and a target class $D$ if one of these +conditions holds: + +\newcommand{\oftype}{\!:\!} + +\begin{itemize} +\item $D$ is a user-class, then the type of $f$ must have the form + $forall~ (x_1 \oftype A_1)..(x_n \oftype A_n)(y\oftype C~x_1..x_n), D~u_1..u_m$ where $m$ + is the number of parameters of $D$. +\item $D$ is {\tt Funclass}, then the type of $f$ must have the form + $forall~ (x_1\oftype A_1)..(x_n\oftype A_n)(y\oftype C~x_1..x_n)(x:A), B$. +\item $D$ is {\tt Sortclass}, then the type of $f$ must have the form + $forall~ (x_1\oftype A_1)..(x_n\oftype A_n)(y\oftype C~x_1..x_n), s$ with $s$ a sort. +\end{itemize} + +We then write $f:C \mbox{\texttt{>->}} D$. The restriction on the type +of coercions is called {\em the uniform inheritance condition}. +Remark that the abstract classes {\tt Funclass} and {\tt Sortclass} +cannot be source classes. + +To coerce an object $t:C~t_1..t_n$ of $C$ towards $D$, we have to +apply the coercion $f$ to it; the obtained term $f~t_1..t_n~t$ is +then an object of $D$. + +\asection{Identity Coercions} +\index{Coercions!identity} + + Identity coercions are special cases of coercions used to go around +the uniform inheritance condition. Let $C$ and $D$ be two classes +with respectively $n$ and $m$ parameters and +$f:forall~(x_1:T_1)..(x_k:T_k)(y:C~u_1..u_n), D~v_1..v_m$ a function which +does not verify the uniform inheritance condition. To declare $f$ as +coercion, one has first to declare a subclass $C'$ of $C$: + +$$C' := fun~ (x_1:T_1)..(x_k:T_k) => C~u_1..u_n$$ + +\noindent We then define an {\em identity coercion} between $C'$ and $C$: +\begin{eqnarray*} +Id\_C'\_C & := & fun~ (x_1:T_1)..(x_k:T_k)(y:C'~x_1..x_k) => (y:C~u_1..u_n)\\ +\end{eqnarray*} + +We can now declare $f$ as coercion from $C'$ to $D$, since we can +``cast'' its type as +$forall~ (x_1:T_1)..(x_k:T_k)(y:C'~x_1..x_k),D~v_1..v_m$.\\ The identity +coercions have a special status: to coerce an object $t:C'~t_1..t_k$ +of $C'$ towards $C$, we does not have to insert explicitly $Id\_C'\_C$ +since $Id\_C'\_C~t_1..t_k~t$ is convertible with $t$. However we +``rewrite'' the type of $t$ to become an object of $C$; in this case, +it becomes $C~u_1^*..u_k^*$ where each $u_i^*$ is the result of the +substitution in $u_i$ of the variables $x_j$ by $t_j$. + + +\asection{Inheritance Graph} +\index{Coercions!inheritance graph} +Coercions form an inheritance graph with classes as nodes. We call +{\em coercion path} an ordered list of coercions between two nodes of +the graph. A class $C$ is said to be a subclass of $D$ if there is a +coercion path in the graph from $C$ to $D$; we also say that $C$ +inherits from $D$. Our mechanism supports multiple inheritance since a +class may inherit from several classes, contrary to simple inheritance +where a class inherits from at most one class. However there must be +at most one path between two classes. If this is not the case, only +the {\em oldest} one is valid and the others are ignored. So the order +of declaration of coercions is important. + +We extend notations for coercions to coercion paths. For instance +$[f_1;..;f_k]:C \mbox{\texttt{>->}} D$ is the coercion path composed +by the coercions $f_1..f_k$. The application of a coercion path to a +term consists of the successive application of its coercions. + +\asection{Declaration of Coercions} + +%%%%% "Class" is useless, since classes are implicitely defined via coercions. + +% \asubsection{\tt Class {\qualid}.}\comindex{Class} +% Declares {\qualid} as a new class. + +% \begin{ErrMsgs} +% \item {\qualid} \errindex{not declared} +% \item {\qualid} \errindex{is already a class} +% \item \errindex{Type of {\qualid} does not end with a sort} +% \end{ErrMsgs} + +% \begin{Variant} +% \item {\tt Class Local {\qualid}.} \\ +% Declares the construction denoted by {\qualid} as a new local class to +% the current section. +% \end{Variant} + +% END "Class" is useless + +\asubsection{\tt Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}.} +\comindex{Coercion} + +Declares the construction denoted by {\qualid} as a coercion between +{\class$_1$} and {\class$_2$}. + +% Useless information +% The classes {\class$_1$} and {\class$_2$} are first declared if necessary. + +\begin{ErrMsgs} +\item {\qualid} \errindex{not declared} +\item {\qualid} \errindex{is already a coercion} +\item \errindex{Funclass cannot be a source class} +\item \errindex{Sortclass cannot be a source class} +\item {\qualid} \errindex{is not a function} +\item \errindex{Cannot find the source class of {\qualid}} +\item \errindex{Cannot recognize {\class$_1$} as a source class of {\qualid}} +\item {\qualid} \errindex{does not respect the inheritance uniform condition} +\item \errindex{Found target class {\class} instead of {\class$_2$}} + +\end{ErrMsgs} + +When the coercion {\qualid} is added to the inheritance graph, non +valid coercion paths are ignored; they are signaled by a warning. +\\[0.3cm] +\noindent {\bf Warning :} +\begin{enumerate} +\item \begin{tabbing} +{\tt Ambiguous paths: }\= $[f_1^1;..;f_{n_1}^1] : C_1\mbox{\tt >->}D_1$\\ + \> ... \\ + \>$[f_1^m;..;f_{n_m}^m] : C_m\mbox{\tt >->}D_m$ + \end{tabbing} +\end{enumerate} + +\begin{Variants} +\item {\tt Coercion Local {\qualid} : {\class$_1$} >-> {\class$_2$}.} +\comindex{Coercion Local}\\ + Declares the construction denoted by {\qualid} as a coercion local to + the current section. + +\item {\tt Coercion {\ident} := {\term}}\comindex{Coercion}\\ + This defines {\ident} just like \texttt{Definition {\ident} := + {\term}}, and then declares {\ident} as a coercion between it + source and its target. + +\item {\tt Coercion {\ident} := {\term} : {\type}}\\ + This defines {\ident} just like + \texttt{Definition {\ident} : {\type} := {\term}}, and then + declares {\ident} as a coercion between it source and its target. + +\item {\tt Coercion Local {\ident} := {\term}}\comindex{Coercion Local}\\ + This defines {\ident} just like \texttt{Local {\ident} := + {\term}}, and then declares {\ident} as a coercion between it + source and its target. + +\item Assumptions can be declared as coercions at declaration +time. This extends the grammar of declarations from Figure +\ref{sentences-syntax} as follows: +\comindex{Variable \mbox{\rm (and coercions)}} +\comindex{Axiom \mbox{\rm (and coercions)}} +\comindex{Parameter \mbox{\rm (and coercions)}} +\comindex{Hypothesis \mbox{\rm (and coercions)}} + +\begin{tabular}{lcl} +%% Declarations +{\declaration} & ::= & {\declarationkeyword} {\assums} {\tt .} \\ +&&\\ +{\assums} & ::= & {\simpleassums} \\ + & $|$ & \nelist{{\tt (} \simpleassums {\tt )}}{} \\ +&&\\ +{\simpleassums} & ::= & \nelist{\ident}{} {\tt :}\zeroone{{\tt >}} {\term}\\ +\end{tabular} + +If the extra {\tt >} is present before the type of some assumptions, these +assumptions are declared as coercions. + +\item Constructors of inductive types can be declared as coercions at +definition time of the inductive type. This extends and modifies the +grammar of inductive types from Figure \ref{sentences-syntax} as follows: +\comindex{Inductive \mbox{\rm (and coercions)}} +\comindex{CoInductive \mbox{\rm (and coercions)}} + +\begin{center} +\begin{tabular}{lcl} +%% Inductives +{\inductive} & ::= & + {\tt Inductive} \nelist{\inductivebody}{with} {\tt .} \\ + & $|$ & {\tt CoInductive} \nelist{\inductivebody}{with} {\tt .} \\ + & & \\ +{\inductivebody} & ::= & + {\ident} \sequence{\binderlet}{} {\tt :} {\term} {\tt :=} \\ + && ~~~\zeroone{\zeroone{\tt |} \nelist{\constructor}{|}} \\ + & & \\ +{\constructor} & ::= & {\ident} \sequence{\binderlet}{} \zeroone{{\tt :}\zeroone{\tt >} {\term}} \\ +\end{tabular} +\end{center} + +Especially, if the extra {\tt >} is present in a constructor +declaration, this constructor is declared as a coercion. +\end{Variants} + +\asubsection{\tt Identity Coercion {\ident}:{\class$_1$} >-> {\class$_2$}.} +\comindex{Identity Coercion} + +We check that {\class$_1$} is a constant with a value of the form +$fun~ (x_1:T_1)..(x_n:T_n) => (\mbox{\class}_2~t_1..t_m)$ where $m$ is the +number of parameters of \class$_2$. Then we define an identity +function with the type +$forall~ (x_1:T_1)..(x_n:T_n)(y:\mbox{\class}_1~x_1..x_n), +{\mbox{\class}_2}~t_1..t_m$, and we declare it as an identity +coercion between {\class$_1$} and {\class$_2$}. + +\begin{ErrMsgs} +\item {\class$_1$} \errindex{must be a transparent constant} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Identity Coercion Local {\ident}:{\ident$_1$} >-> {\ident$_2$}.} \\ +Idem but locally to the current section. + +\item {\tt SubClass {\ident} := {\type}.} \\ +\comindex{SubClass} + If {\type} is a class +{\ident'} applied to some arguments then {\ident} is defined and an +identity coercion of name {\tt Id\_{\ident}\_{\ident'}} is +declared. Otherwise said, this is an abbreviation for + +{\tt Definition {\ident} := {\type}.} + + followed by + +{\tt Identity Coercion Id\_{\ident}\_{\ident'}:{\ident} >-> {\ident'}}. + +\item {\tt Local SubClass {\ident} := {\type}.} \\ +Same as before but locally to the current section. + +\end{Variants} + +\asection{Displaying Available Coercions} + +\asubsection{\tt Print Classes.} +\comindex{Print Classes} +Print the list of declared classes in the current context. + +\asubsection{\tt Print Coercions.} +\comindex{Print Coercions} +Print the list of declared coercions in the current context. + +\asubsection{\tt Print Graph.} +\comindex{Print Graph} +Print the list of valid coercion paths in the current context. + +\asubsection{\tt Print Coercion Paths {\class$_1$} {\class$_2$}.} +\comindex{Print Coercion Paths} +Print the list of valid coercion paths from {\class$_1$} to {\class$_2$}. + +\asection{Activating the Printing of Coercions} + +\asubsection{\tt Set Printing Coercions.} +\comindex{Set Printing Coercions} +\comindex{Unset Printing Coercions} + +This command forces all the coercions to be printed. +Conversely, to skip the printing of coercions, use + {\tt Unset Printing Coercions}. +By default, coercions are not printed. + +\asubsection{\tt Set Printing Coercion {\qualid}.} +\comindex{Set Printing Coercion} +\comindex{Unset Printing Coercion} + +This command forces coercion denoted by {\qualid} to be printed. +To skip the printing of coercion {\qualid}, use + {\tt Unset Printing Coercion {\qualid}}. +By default, a coercion is never printed. + +\asection{Classes as Records} +\label{Coercions-and-records} +\index{Coercions!and records} +We allow the definition of {\em Structures with Inheritance} (or +classes as records) by extending the existing {\tt Record} macro +(see section~\ref{Record}). Its new syntax is: + +\begin{center} +\begin{tabular}{l} +{\tt Record \zeroone{>}~{\ident} {\binderlet} : {\sort} := \zeroone{\ident$_0$} \verb+{+} \\ +~~~~\begin{tabular}{l} + {\tt \ident$_1$ $[$:$|$:>$]$ \term$_1$ ;} \\ + ... \\ + {\tt \ident$_n$ $[$:$|$:>$]$ \term$_n$ \verb+}+. } + \end{tabular} +\end{tabular} +\end{center} +The identifier {\ident} is the name of the defined record and {\sort} +is its type. The identifier {\ident$_0$} is the name of its +constructor. The identifiers {\ident$_1$}, .., {\ident$_n$} are the +names of its fields and {\term$_1$}, .., {\term$_n$} their respective +types. The alternative {\tt $[$:$|$:>$]$} is ``{\tt :}'' or ``{\tt +:>}''. If {\tt {\ident$_i$}:>{\term$_i$}}, then {\ident$_i$} is +automatically declared as coercion from {\ident} to the class of +{\term$_i$}. Remark that {\ident$_i$} always verifies the uniform +inheritance condition. If the optional ``{\tt >}'' before {\ident} is +present, then {\ident$_0$} (or the default name {\tt Build\_{\ident}} +if {\ident$_0$} is omitted) is automatically declared as a coercion +from the class of {\term$_n$} to {\ident} (this may fail if the +uniform inheritance condition is not satisfied). + +\Rem The keyword {\tt Structure}\comindex{Structure} is a synonym of {\tt +Record}. + +\asection{Coercions and Sections} +\index{Coercions!and sections} + The inheritance mechanism is compatible with the section +mechanism. The global classes and coercions defined inside a section +are redefined after its closing, using their new value and new +type. The classes and coercions which are local to the section are +simply forgotten. +Coercions with a local source class or a local target class, and +coercions which do not verify the uniform inheritance condition any longer +are also forgotten. + +\asection{Examples} + + There are three situations: + +\begin{itemize} +\item $f~a$ is ill-typed where $f:forall~x:A,B$ and $a:A'$. If there is a + coercion path between $A'$ and $A$, $f~a$ is transformed into + $f~a'$ where $a'$ is the result of the application of this + coercion path to $a$. + +We first give an example of coercion between atomic inductive types + +%\begin{\small} +\begin{coq_example} +Definition bool_in_nat (b:bool) := if b then 0 else 1. +Coercion bool_in_nat : bool >-> nat. +Check (0 = true). +Set Printing Coercions. +Check (0 = true). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + +\Warning ``\verb|Check true=O.|'' fails. This is ``normal'' behaviour of +coercions. To validate \verb|true=O|, the coercion is searched from +\verb=nat= to \verb=bool=. There is none. + +We give an example of coercion between classes with parameters. + +%\begin{\small} +\begin{coq_example} +Parameters + (C : nat -> Set) (D : nat -> bool -> Set) (E : bool -> Set). +Parameter f : forall n:nat, C n -> D (S n) true. +Coercion f : C >-> D. +Parameter g : forall (n:nat) (b:bool), D n b -> E b. +Coercion g : D >-> E. +Parameter c : C 0. +Parameter T : E true -> nat. +Check (T c). +Set Printing Coercions. +Check (T c). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + +We give now an example using identity coercions. + +%\begin{small} +\begin{coq_example} +Definition D' (b:bool) := D 1 b. +Identity Coercion IdD'D : D' >-> D. +Print IdD'D. +Parameter d' : D' true. +Check (T d'). +Set Printing Coercions. +Check (T d'). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + + + In the case of functional arguments, we use the monotonic rule of +sub-typing. Approximatively, to coerce $t:forall~x:A, B$ towards +$forall~x:A',B'$, one have to coerce $A'$ towards $A$ and $B$ towards +$B'$. An example is given below: + +%\begin{small} +\begin{coq_example} +Parameters (A B : Set) (h : A -> B). +Coercion h : A >-> B. +Parameter U : (A -> E true) -> nat. +Parameter t : B -> C 0. +Check (U t). +Set Printing Coercions. +Check (U t). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + + Remark the changes in the result following the modification of the +previous example. + +%\begin{small} +\begin{coq_example} +Parameter U' : (C 0 -> B) -> nat. +Parameter t' : E true -> A. +Check (U' t'). +Set Printing Coercions. +Check (U' t'). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + +\item An assumption $x:A$ when $A$ is not a type, is ill-typed. It is + replaced by $x:A'$ where $A'$ is the result of the application + to $A$ of the coercion path between the class of $A$ and {\tt + Sortclass} if it exists. This case occurs in the abstraction + $fun~ x:A => t$, universal quantification $forall~x:A, B$, + global variables and parameters of (co-)inductive definitions + and functions. In $forall~x:A, B$, such a coercion path may be + applied to $B$ also if necessary. + +%\begin{small} +\begin{coq_example} +Parameter Graph : Type. +Parameter Node : Graph -> Type. +Coercion Node : Graph >-> Sortclass. +Parameter G : Graph. +Parameter Arrows : G -> G -> Type. +Check Arrows. +Parameter fg : G -> G. +Check fg. +Set Printing Coercions. +Check fg. +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + +\item $f~a$ is ill-typed because $f:A$ is not a function. The term + $f$ is replaced by the term obtained by applying to $f$ the + coercion path between $A$ and {\tt Funclass} if it exists. + +%\begin{small} +\begin{coq_example} +Parameter bij : Set -> Set -> Set. +Parameter ap : forall A B:Set, bij A B -> A -> B. +Coercion ap : bij >-> Funclass. +Parameter b : bij nat nat. +Check (b 0). +Set Printing Coercions. +Check (b 0). +\end{coq_example} +%\end{small} + +\begin{coq_eval} +Unset Printing Coercions. +\end{coq_eval} + +Let us see the resulting graph of this session. + +%\begin{small} +\begin{coq_example} +Print Graph. +\end{coq_example} +%\end{small} + +\end{itemize} + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Extraction.tex b/doc/refman/Extraction.tex new file mode 100644 index 00000000..fcce23f9 --- /dev/null +++ b/doc/refman/Extraction.tex @@ -0,0 +1,664 @@ +\achapter{Extraction of programs in Objective Caml and Haskell} +\label{Extraction} +\aauthor{Jean-Christophe Filliâtre and Pierre Letouzey} +\index{Extraction} + +\begin{flushleft} + \em The status of extraction is experimental. +\end{flushleft} +We present here the \Coq\ extraction commands, used to build certified +and relatively efficient functional programs, extracting them from the +proofs of their specifications. The functional languages available as +output are currently \ocaml{}, \textsc{Haskell} and \textsc{Scheme}. +In the following, ``ML'' will be used (abusively) to refer to any of +the three. + +\paragraph{Differences with old versions.} +The current extraction mechanism is new for version 7.0 of {\Coq}. +In particular, the \FW\ toplevel used as an intermediate step between +\Coq\ and ML has been withdrawn. It is also not possible +any more to import ML objects in this \FW\ toplevel. +The current mechanism also differs from +the one in previous versions of \Coq: there is no more +an explicit toplevel for the language (formerly called \textsc{Fml}). + +\asection{Generating ML code} +\comindex{Extraction} +\comindex{Recursive Extraction} +\comindex{Extraction Module} +\comindex{Recursive Extraction Module} + +The next two commands are meant to be used for rapid preview of +extraction. They both display extracted term(s) inside \Coq. + +\begin{description} +\item {\tt Extraction \qualid.} ~\par + Extracts one constant or module in the \Coq\ toplevel. + +\item {\tt Recursive Extraction \qualid$_1$ \dots\ \qualid$_n$.} ~\par + Recursive extraction of all the globals (or modules) \qualid$_1$ \dots\ + \qualid$_n$ and all their dependencies in the \Coq\ toplevel. +\end{description} + +%% TODO error messages + +All the following commands produce real ML files. User can choose to produce +one monolithic file or one file per \Coq\ library. + +\begin{description} +\item {\tt Extraction "{\em file}"} + \qualid$_1$ \dots\ \qualid$_n$. ~\par + Recursive extraction of all the globals (or modules) \qualid$_1$ \dots\ + \qualid$_n$ and all their dependencies in one monolithic file {\em file}. + Global and local identifiers are renamed according to the choosen ML + language to fullfill its syntactic conventions, keeping original + names as much as possible. + +\item {\tt Extraction Library} \ident. ~\par + Extraction of the whole \Coq\ library {\tt\ident.v} to an ML module + {\tt\ident.ml}. In case of name clash, identifiers are here renamed + using prefixes \verb!coq_! or \verb!Coq_! to ensure a + session-independent renaming. + +\item {\tt Recursive Extraction Library} \ident. ~\par + Extraction of the \Coq\ library {\tt\ident.v} and all other modules + {\tt\ident.v} depends on. +\end{description} + +The list of globals \qualid$_i$ does not need to be +exhaustive: it is automatically completed into a complete and minimal +environment. + +\asection{Extraction options} + +\asubsection{Setting the target language} +\comindex{Extraction Language} + +The ability to fix target language is the first and more important +of the extraction options. Default is Ocaml. Besides Haskell and +Scheme, another language called Toplevel is provided. It is a pseudo-Ocaml, +with no renaming on global names: so names are printed as in \Coq. +This third language is available only at the \Coq\ Toplevel. +\begin{description} +\item {\tt Extraction Language Ocaml}. +\item {\tt Extraction Language Haskell}. +\item {\tt Extraction Language Scheme}. +\item {\tt Extraction Language Toplevel}. +\end{description} + +\asubsection{Inlining and optimizations} + +Since Objective Caml is a strict language, the extracted +code has to be optimized in order to be efficient (for instance, when +using induction principles we do not want to compute all the recursive +calls but only the needed ones). So the extraction mechanism provides +an automatic optimization routine that will be +called each time the user want to generate Ocaml programs. Essentially, +it performs constants inlining and reductions. Therefore some +constants may not appear in resulting monolithic Ocaml program (a warning is +printed for each such constant). In the case of modular extraction, +even if some inlining is done, the inlined constant are nevertheless +printed, to ensure session-independent programs. + +Concerning Haskell, such optimizations are less useful because of +lazyness. We still make some optimizations, for example in order to +produce more readable code. + +All these optimizations are controled by the following \Coq\ options: + +\begin{description} + +\item \comindex{Set Extraction Optimize} +{\tt Set Extraction Optimize.} + +\item \comindex{Unset Extraction Optimize} +{\tt Unset Extraction Optimize.} + +Default is Set. This control all optimizations made on the ML terms +(mostly reduction of dummy beta/iota redexes, but also simplications on +Cases, etc). Put this option to Unset if you want a ML term as close as +possible to the Coq term. + +\item \comindex{Set Extraction AutoInline} +{\tt Set Extraction AutoInline.} + +\item \comindex{Unset Extraction AutoInline} +{\tt Unset Extraction AutoInline.} + +Default is Set, so by default, the extraction mechanism feels free to +inline the bodies of some defined constants, according to some heuristics +like size of bodies, useness of some arguments, etc. Those heuristics are +not always perfect, you may want to disable this feature, do it by Unset. + +\item \comindex{Extraction Inline} +{\tt Extraction Inline} \qualid$_1$ \dots\ \qualid$_n$. + +\item \comindex{Extraction NoInline} +{\tt Extraction NoInline} \qualid$_1$ \dots\ \qualid$_n$. + +In addition to the automatic inline feature, you can now tell precisely to +inline some more constants by the {\tt Extraction Inline} command. Conversely, +you can forbid the automatic inlining of some specific constants by +the {\tt Extraction NoInline} command. +Those two commands enable a precise control of what is inlined and what is not. + +\item \comindex{Print Extraction Inline} +{\tt Print Extraction Inline}. + +Prints the current state of the table recording the custom inlinings +declared by the two previous commands. + +\item \comindex{Reset Extraction Inline} +{\tt Reset Extraction Inline}. + +Puts the table recording the custom inlinings back to empty. + +\end{description} + + +\paragraph{Inlining and printing of a constant declaration.} + +A user can explicitely asks a constant to be extracted by two means: +\begin{itemize} +\item by mentioning it on the extraction command line +\item by extracting the whole \Coq\ module of this constant. +\end{itemize} +In both cases, the declaration of this constant will be present in the +produced file. +But this same constant may or may not be inlined in the following +terms, depending on the automatic/custom inlining mechanism. + + +For the constants non-explicitely required but needed for dependancy +reasons, there are two cases: +\begin{itemize} +\item If an inlining decision is taken, wether automatically or not, +all occurences of this constant are replaced by its extracted body, and +this constant is not declared in the generated file. +\item If no inlining decision is taken, the constant is normally + declared in the produced file. +\end{itemize} + +\asubsection{Realizing axioms}\label{extraction:axioms} + +Extraction will fail if it encounters an informative +axiom not realized (see section \ref{extraction:axioms}). +A warning will be issued if it encounters an logical axiom, to remind +user that inconsistant logical axioms may lead to incorrect or +non-terminating extracted terms. + +It is possible to assume some axioms while developing a proof. Since +these axioms can be any kind of proposition or object or type, they may +perfectly well have some computational content. But a program must be +a closed term, and of course the system cannot guess the program which +realizes an axiom. Therefore, it is possible to tell the system +what ML term corresponds to a given axiom. + +\comindex{Extract Constant} +\begin{description} +\item{\tt Extract Constant \qualid\ => \str.} ~\par + Give an ML extraction for the given constant. + The \str\ may be an identifier or a quoted string. +\item{\tt Extract Inlined Constant \qualid\ => \str.} ~\par + Same as the previous one, except that the given ML terms will + be inlined everywhere instead of being declared via a let. +\end{description} + +Note that the {\tt Extract Inlined Constant} command is sugar +for an {\tt Extract Constant} followed by a {\tt Extraction Inline}. +Hence a {\tt Reset Extraction Inline} will have an effect on the +realized and inlined xaxiom. + +Of course, it is the responsability of the user to ensure that the ML +terms given to realize the axioms do have the expected types. In +fact, the strings containing realizing code are just copied in the +extracted files. The extraction recognize whether the realized axiom +should become a ML type constant or a ML object declaration. + +\Example +\begin{coq_example} +Axiom X:Set. +Axiom x:X. +Extract Constant X => "int". +Extract Constant x => "0". +\end{coq_example} + +Notice that in the case of type scheme axiom (i.e. whose type is an +arity, that is a sequence of product finished by a sort), then some type +variables has to be given. The syntax is then: + +\begin{description} +\item{\tt Extract Constant \qualid\ \str$_1$ \ldots \str$_n$ => \str.} ~\par +\end{description} + +The number of type variable given is checked by the system. + +\Example +\begin{coq_example} +Axiom Y : Set -> Set -> Set. +Extract Constant Y "'a" "'b" => " 'a*'b ". +\end{coq_example} + +Realizing an axiom via {\tt Extract Constant} is only useful in the +case of an informative axiom (of sort Type or Set). A logical axiom +have no computational content and hence will not appears in extracted +terms. But a warning is nonetheless issued if extraction encounters a +logical axiom. This warning reminds user that inconsistant logical +axioms may lead to incorrect or non-terminating extracted terms. + +If an informative axiom has not been realized before an extraction, a +warning is also issued and the definition of the axiom is filled with +an exception labelled {\tt AXIOM TO BE REALIZED}. The user must then +search these exceptions inside the extracted file and replace them by +real code. + +\comindex{Extract Inductive} + +The system also provides a mechanism to specify ML terms for inductive +types and constructors. For instance, the user may want to use the ML +native boolean type instead of \Coq\ one. The syntax is the following: + +\begin{description} +\item{\tt Extract Inductive \qualid\ => \str\ [ \str\ \dots \str\ ].} ~\par + Give an ML extraction for the given inductive type. You must specify + extractions for the type itself (first \str) and all its + constructors (between square brackets). The ML extraction must be an + ML recursive datatype. +\end{description} + +\Example +Typical examples are the following: +\begin{coq_example} +Extract Inductive unit => "unit" [ "()" ]. +Extract Inductive bool => "bool" [ "true" "false" ]. +Extract Inductive sumbool => "bool" [ "true" "false" ]. +\end{coq_example} + + +\asection{Differences between \Coq\ and ML type systems} + + +Due to differences between \Coq\ and ML type systems, +some extracted programs are not directly typable in ML. +We now solve this problem (at least in Ocaml) by adding +when needed some unsafe casting {\tt Obj.magic}, which give +a generic type {\tt 'a} to any term. + +For example, Here are two kinds of problem that can occur: + +\begin{itemize} + \item If some part of the program is {\em very} polymorphic, there + may be no ML type for it. In that case the extraction to ML works + all right but the generated code may be refused by the ML + type-checker. A very well known example is the {\em distr-pair} + function: +\begin{verbatim} +Definition dp := + fun (A B:Set)(x:A)(y:B)(f:forall C:Set, C->C) => (f A x, f B y). +\end{verbatim} + +In Ocaml, for instance, the direct extracted term would be: + +\begin{verbatim} +let dp x y f = Pair((f () x),(f () y)) +\end{verbatim} + +and would have type: +\begin{verbatim} +dp : 'a -> 'a -> (unit -> 'a -> 'b) -> ('b,'b) prod +\end{verbatim} + +which is not its original type, but a restriction. + +We now produce the following correct version: +\begin{verbatim} +let dp x y f = Pair ((Obj.magic f () x), (Obj.magic f () y)) +\end{verbatim} + + \item Some definitions of \Coq\ may have no counterpart in ML. This + happens when there is a quantification over types inside the type + of a constructor; for example: +\begin{verbatim} +Inductive anything : Set := dummy : forall A:Set, A -> anything. +\end{verbatim} + +which corresponds to the definition of an ML dynamic type. +In Ocaml, we must cast any argument of the constructor dummy. + +\end{itemize} + +Even with those unsafe castings, you should never get error like +``segmentation fault''. In fact even if your program may seem +ill-typed to the Ocaml type-checker, it can't go wrong: it comes +from a Coq well-typed terms, so for example inductives will always +have the correct number of arguments, etc. + +More details about the correctness of the extracted programs can be +found in \cite{Let02}. + +We have to say, though, that in most ``realistic'' programs, these +problems do not occur. For example all the programs of Coq library are +accepted by Caml type-checker without any {\tt Obj.magic} (see examples below). + + + +\asection{Some examples} + +We present here two examples of extractions, taken from the +\Coq\ Standard Library. We choose \ocaml\ as target language, +but all can be done in the other dialects with slight modifications. +We then indicate where to find other examples and tests of Extraction. + +\asubsection{A detailed example: Euclidean division} + +The file {\tt Euclid} contains the proof of Euclidean division +(theorem {\tt eucl\_dev}). The natural numbers defined in the example +files are unary integers defined by two constructors $O$ and $S$: +\begin{coq_example*} +Inductive nat : Set := + | O : nat + | S : nat -> nat. +\end{coq_example*} + +This module contains a theorem {\tt eucl\_dev}, and its extracted term +is of type +\begin{verbatim} +forall b:nat, b > 0 -> forall a:nat, diveucl a b +\end{verbatim} +where {\tt diveucl} is a type for the pair of the quotient and the modulo. +We can now extract this program to \ocaml: + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example} +Require Import Euclid. +Extraction Inline Wf_nat.gt_wf_rec Wf_nat.lt_wf_rec. +Recursive Extraction eucl_dev. +\end{coq_example} + +The inlining of {\tt gt\_wf\_rec} and {\tt lt\_wf\_rec} is not +mandatory. It only enhances readability of extracted code. +You can then copy-paste the output to a file {\tt euclid.ml} or let +\Coq\ do it for you with the following command: + +\begin{coq_example} +Extraction "euclid" eucl_dev. +\end{coq_example} + +Let us play the resulting program: + +\begin{verbatim} +# #use "euclid.ml";; +type sumbool = Left | Right +type nat = O | S of nat +type diveucl = Divex of nat * nat +val minus : nat -> nat -> nat = +val le_lt_dec : nat -> nat -> sumbool = +val le_gt_dec : nat -> nat -> sumbool = +val eucl_dev : nat -> nat -> diveucl = +# eucl_dev (S (S O)) (S (S (S (S (S O)))));; +- : diveucl = Divex (S (S O), S O) +\end{verbatim} +It is easier to test on \ocaml\ integers: +\begin{verbatim} +# let rec i2n = function 0 -> O | n -> S (i2n (n-1));; +val i2n : int -> nat = +# let rec n2i = function O -> 0 | S p -> 1+(n2i p);; +val n2i : nat -> int = +# let div a b = + let Divex (q,r) = eucl_dev (i2n b) (i2n a) in (n2i q, n2i r);; +div : int -> int -> int * int = +# div 173 15;; +- : int * int = 11, 8 +\end{verbatim} + +\asubsection{Another detailed example: Heapsort} + +The file {\tt Heap.v} +contains the proof of an efficient list sorting algorithm described by +Bjerner. Is is an adaptation of the well-known {\em heapsort} +algorithm to functional languages. The main function is {\tt +treesort}, whose type is shown below: + + +\begin{coq_eval} +Reset Initial. +Require Import Relation_Definitions. +Require Import List. +Require Import Sorting. +Require Import Permutation. +\end{coq_eval} +\begin{coq_example} +Require Import Heap. +Check treesort. +\end{coq_example} + +Let's now extract this function: + +\begin{coq_example} +Extraction Inline sort_rec is_heap_rec. +Extraction NoInline list_to_heap. +Extraction "heapsort" treesort. +\end{coq_example} + +One more time, the {\tt Extraction Inline} and {\tt NoInline} +directives are cosmetic. Without it, everything goes right, +but the output is less readable. +Here is the produced file {\tt heapsort.ml}: + +\begin{verbatim} +type nat = + | O + | S of nat + +type 'a sig2 = + 'a + (* singleton inductive, whose constructor was exist2 *) + +type sumbool = + | Left + | Right + +type 'a list = + | Nil + | Cons of 'a * 'a list + +type 'a multiset = + 'a -> nat + (* singleton inductive, whose constructor was Bag *) + +type 'a merge_lem = + 'a list + (* singleton inductive, whose constructor was merge_exist *) + +(** val merge : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) -> + 'a1 list -> 'a1 list -> 'a1 merge_lem **) + +let rec merge leA_dec eqA_dec l1 l2 = + match l1 with + | Nil -> l2 + | Cons (a, l) -> + let rec f = function + | Nil -> Cons (a, l) + | Cons (a0, l3) -> + (match leA_dec a a0 with + | Left -> Cons (a, + (merge leA_dec eqA_dec l (Cons (a0, l3)))) + | Right -> Cons (a0, (f l3))) + in f l2 + +type 'a tree = + | Tree_Leaf + | Tree_Node of 'a * 'a tree * 'a tree + +type 'a insert_spec = + 'a tree + (* singleton inductive, whose constructor was insert_exist *) + +(** val insert : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) -> + 'a1 tree -> 'a1 -> 'a1 insert_spec **) + +let rec insert leA_dec eqA_dec t a = + match t with + | Tree_Leaf -> Tree_Node (a, Tree_Leaf, Tree_Leaf) + | Tree_Node (a0, t0, t1) -> + let h3 = fun x -> insert leA_dec eqA_dec t0 x in + (match leA_dec a0 a with + | Left -> Tree_Node (a0, t1, (h3 a)) + | Right -> Tree_Node (a, t1, (h3 a0))) + +type 'a build_heap = + 'a tree + (* singleton inductive, whose constructor was heap_exist *) + +(** val list_to_heap : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> + sumbool) -> 'a1 list -> 'a1 build_heap **) + +let rec list_to_heap leA_dec eqA_dec = function + | Nil -> Tree_Leaf + | Cons (a, l0) -> + insert leA_dec eqA_dec (list_to_heap leA_dec eqA_dec l0) a + +type 'a flat_spec = + 'a list + (* singleton inductive, whose constructor was flat_exist *) + +(** val heap_to_list : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> + sumbool) -> 'a1 tree -> 'a1 flat_spec **) + +let rec heap_to_list leA_dec eqA_dec = function + | Tree_Leaf -> Nil + | Tree_Node (a, t0, t1) -> Cons (a, + (merge leA_dec eqA_dec (heap_to_list leA_dec eqA_dec t0) + (heap_to_list leA_dec eqA_dec t1))) + +(** val treesort : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) + -> 'a1 list -> 'a1 list sig2 **) + +let treesort leA_dec eqA_dec l = + heap_to_list leA_dec eqA_dec (list_to_heap leA_dec eqA_dec l) + +\end{verbatim} + +Let's test it: +% Format.set_margin 72;; +\begin{verbatim} +# #use "heapsort.ml";; +type sumbool = Left | Right +type nat = O | S of nat +type 'a tree = Tree_Leaf | Tree_Node of 'a * 'a tree * 'a tree +type 'a list = Nil | Cons of 'a * 'a list +val merge : + ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a list -> 'a list = +val heap_to_list : + ('a -> 'a -> sumbool) -> 'b -> 'a tree -> 'a list = +val insert : + ('a -> 'a -> sumbool) -> 'b -> 'a tree -> 'a -> 'a tree = +val list_to_heap : + ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a tree = +val treesort : + ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a list = +\end{verbatim} + +One can remark that the argument of {\tt treesort} corresponding to +{\tt eqAdec} is never used in the informative part of the terms, +only in the logical parts. So the extracted {\tt treesort} never use +it, hence this {\tt 'b} argument. We will use {\tt ()} for this +argument. Only remains the {\tt leAdec} +argument (of type {\tt 'a -> 'a -> sumbool}) to really provide. + +\begin{verbatim} +# let leAdec x y = if x <= y then Left else Right;; +val leAdec : 'a -> 'a -> sumbool = +# let rec listn = function 0 -> Nil + | n -> Cons(Random.int 10000,listn (n-1));; +val listn : int -> int list = +# treesort leAdec () (listn 9);; +- : int list = Cons (160, Cons (883, Cons (1874, Cons (3275, Cons + (5392, Cons (7320, Cons (8512, Cons (9632, Cons (9876, Nil))))))))) +\end{verbatim} + +Some tests on longer lists (10000 elements) show that the program is +quite efficient for Caml code. + + +\asubsection{The Standard Library} + +As a test, we propose an automatic extraction of the +Standard Library of \Coq. In particular, we will find back the +two previous examples, {\tt Euclid} and {\tt Heapsort}. +Go to directory {\tt contrib/extraction/test} of the sources of \Coq, +and run commands: +\begin{verbatim} +make tree; make +\end{verbatim} +This will extract all Standard Library files and compile them. +It is done via many {\tt Extraction Module}, with some customization +(see subdirectory {\tt custom}). + +%The result of this extraction of the Standard Library can be browsed +%at URL +%\begin{flushleft} +%\url{http://www.lri.fr/~letouzey/extraction}. +%\end{flushleft} + +%Reals theory is normally not extracted, since it is an axiomatic +%development. We propose nonetheless a dummy realization of those +%axioms, to test, run: \\ +% +%\mbox{\tt make reals}\\ + +This test works also with Haskell. In the same directory, run: +\begin{verbatim} +make tree; make -f Makefile.haskell +\end{verbatim} +The haskell compiler currently used is {\tt hbc}. Any other should +also work, just adapt the {\tt Makefile.haskell}. In particular {\tt + ghc} is known to work. + +\asubsection{Extraction's horror museum} + +Some pathological examples of extraction are grouped in the file +\begin{verbatim} +contrib/extraction/test_extraction.v +\end{verbatim} +of the sources of \Coq. + +\asubsection{Users' Contributions} + + Several of the \Coq\ Users' Contributions use extraction to produce + certified programs. In particular the following ones have an automatic + extraction test (just run {\tt make} in those directories): + + \begin{itemize} + \item Bordeaux/Additions + \item Bordeaux/EXCEPTIONS + \item Bordeaux/SearchTrees + \item Dyade/BDDS + \item Lannion + \item Lyon/CIRCUITS + \item Lyon/FIRING-SQUAD + \item Marseille/CIRCUITS + \item Muenchen/Higman + \item Nancy/FOUnify + \item Rocq/ARITH/Chinese + \item Rocq/COC + \item Rocq/GRAPHS + \item Rocq/HIGMAN + \item Sophia-Antipolis/Stalmarck + \item Suresnes/BDD + \end{itemize} + + Lannion, Rocq/HIGMAN and Lyon/CIRCUITS are a bit particular. They are + the only examples of developments where {\tt Obj.magic} are needed. + This is probably due to an heavy use of impredicativity. + After compilation those two examples run nonetheless, + thanks to the correction of the extraction~\cite{Let02}. + +% $Id: Extraction.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Helm.tex b/doc/refman/Helm.tex new file mode 100644 index 00000000..af34af43 --- /dev/null +++ b/doc/refman/Helm.tex @@ -0,0 +1,317 @@ +\label{Helm} +\index{XML exportation} +\index{Proof rendering} + +This section describes the exportation of {\Coq} theories to XML that +has been contributed by Claudio Sacerdoti Coen. Currently, the main +applications are the rendering and searching tool +developed within the HELM\footnote{Hypertextual Electronic Library of +Mathematics} and MoWGLI\footnote{Mathematics on the Web, Get it by +Logic and Interfaces} projects mainly at the University of Bologna and +partly at INRIA-Sophia Antipolis. + +\subsection{Practical use of the XML exportation tool} + +The basic way to export the logical content of a file into XML format +is to use {\tt coqc} with option {\tt -xml}. +When the {\tt -xml} flag is set, every definition or declaration is +immediately exported to XML once concluded. +The system environment variable {\tt COQ\_XML\_LIBRARY\_ROOT} must be +previously set to a directory in which the logical structure of the +exported objects is reflected. + + For {\tt Makefile} files generated by \verb+coq_makefile+ (see section + \ref{Makefile}), it is sufficient to compile the files using + \begin{quotation} + \verb+make COQ_XML=-xml+ + \end{quotation} + (or, equivalently, setting the environment variable \verb+COQ_XML+) + + To export a development to XML, the suggested procedure is then: + + \begin{enumerate} + \item add to your own contribution a valid \verb+Make+ file and use + \verb+coq_makefile+ to generate the \verb+Makefile+ from the \verb+Make+ + file. + + \Warning Since logical names are used to structure the XML + hierarchy, always add to the \verb+Make+ file at least one \verb+"-R"+ + option to map physical file names to logical module paths. + \item set the \verb+COQ_XML_LIBRARY_ROOT+ environment variable to + the directory where the XML file hierarchy must be physically + rooted. + \item compile your contribution with \verb+"make COQ_XML=-xml"+ + \end{enumerate} + +\Rem In case the system variable {\tt COQ\_XML\_LIBRARY\_ROOT} is not set, +the output is done on the standard output. Also, the files are +compressed using {\tt gzip} after creation. This is to save disk space +since the XML format is very verbose. + +\subsection{Reflection of the logical structure into the file system} + +For each {\Coq} logical object, several independent files associated +to this object are created. The structure of the long name of the +object is reflected in the directory structure of the file system. +E.g. an object of long name {\tt +{\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}} is exported to files in the +subdirectory {{\ident$_1$}/{\ldots}/{\ident$_n$}} of the directory +bound to the environment variable {\tt COQ\_XML\_LIBRARY\_ROOT}. + +\subsection{What is exported?} + +The XML exportation tool exports the logical content of {\Coq} +theories. This covers global definitions (including lemmas, theorems, +...), global assumptions (parameters and axioms), local assumptions or +definitions, and inductive definitions. + +Vernacular files are exported to {\tt .theory.xml} files. +%Variables, +%definitions, theorems, axioms and proofs are exported to individual +%files whose suffixes range from {\tt .var.xml}, {\tt .con.xml}, {\tt +%.con.body.xml}, {\tt .con.types.xml} to {\tt .con.proof_tree.xml}. +Comments are pre-processed with {\sf coqdoc} (see section +\ref{coqdoc}). Especially, they have to be enclosed within {\tt (**} +and {\tt *)} to be exported. + +For each inductive definition of name +{\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}, a file named {\tt +{\ident}.ind.xml} is created in the subdirectory {\tt +{\ident$_1$}/{\ldots}/{\ident$_n$}} of the xml library root +directory. It contains the arities and constructors of the type. For mutual inductive definitions, the file is named after the +name of the first inductive type of the block. + +For each global definition of base name {\tt +{\ident$_1$}.{\ldots}.{\ident$_n$}.{\ident}}, files named +{\tt {\ident}.con.body.xml} and {\tt {\ident}.con.xml} are created in the +subdirectory {\tt {\ident$_1$}/{\ldots}/{\ident$_n$}}. They +respectively contain the body and the type of the definition. + +For each global assumption of base name {\tt +{\ident$_1$}.{\ident$_2$}.{\ldots}.{\ident$_n$}.{\ident}}, a file +named {\tt {\ident}.con.xml} is created in the subdirectory {\tt +{\ident$_1$}/{\ldots}/{\ident$_n$}}. It contains the type of the +global assumption. + +For each local assumption or definition of base name {\ident} located +in sections {\ident$'_1$}, {\ldots}, {\ident$'_p$} of the module {\tt +{\ident$_1$}.{\ident$_2$}.{\ldots}.{\ident$_n$}.{\ident}}, a file +named {\tt {\ident}.var.xml} is created in the subdirectory {\tt +{\ident$_1$}/{\ldots}/{\ident$_n$}/{\ident$'_1$}/\ldots/{\ident$'_p$}}. +It contains its type and, if a definition, its body. + +In order to do proof-rendering (for example in natural language), some +redundant typing information is required, i.e. the type of at least +some of the subterms of the bodies and types of the CIC objects. These +types are called inner types and are exported to files of suffix {\tt +.types.xml} by the exportation tool. + + +% Deactivated +%% \subsection{Proof trees} + +%% For each definition or theorem that has been built with tactics, an +%% extra file of suffix {\tt proof\_tree.xml} is created. It contains the +%% proof scripts and is used for rendering the proof. + +\subsection{Inner types} +\label{inner-types} + +The type of a subterm of a construction is called an {\em inner type} +if it respects the following conditions. + +\begin{enumerate} + \item Its sort is \verb+Prop+\footnote{or {\tt CProp} which is the + "sort"-like definition used in C-CoRN (see + \url{http://vacuumcleaner.cs.kun.nl/c-corn}) to type + computationally relevant predicative propositions.}. + \item It is not a type cast nor an atomic term (variable, constructor or constant). + \item If it's root is an abstraction, then the root's parent node is + not an abstraction, i.e. only the type of the outer abstraction of + a block of nested abstractions is printed. +\end{enumerate} + +The rationale for the 3$^{rd}$ condition is that the type of the inner +abstractions could be easily computed starting from the type of the +outer ones; moreover, the types of the inner abstractions requires a +lot of disk/memory space: removing the 3$^{rd}$ condition leads to XML +file that are two times as big as the ones exported applying the 3$^{rd}$ +condition. + +\subsection{Interactive exportation commands} + +There are also commands to be used interactively in {\tt coqtop}. + +\subsubsection{\tt Print XML {\qualid}} +\comindex{Print XML} + +If the variable {\tt COQ\_XML\_LIBRARY\_ROOT} is set, this command creates +files containing the logical content in XML format of {\qualid}. If +the variable is not set, the result is displayed on the standard +output. + +\begin{Variants} +\item {\tt Print XML File {\str} {\qualid}}\\ +This writes the logical content of {\qualid} in XML format to files +whose prefix is {\str}. +\end{Variants} + +\subsubsection{{\tt Show XML Proof}} +\comindex{Show XML Proof} + +If the variable {\tt COQ\_XML\_LIBRARY\_ROOT} is set, this command creates +files containing the current proof in progress in XML format. It +writes also an XML file made of inner types. If the variable is not +set, the result is displayed on the standard output. + +\begin{Variants} +\item {\tt Show XML File {\str} Proof}\\ This writes the +logical content of {\qualid} in XML format to files whose prefix is +{\str}. +\end{Variants} + +\subsection{Applications: rendering, searching and publishing} + +The HELM team at the University of Bologna has developed tools +exploiting the XML exportation of {\Coq} libraries. This covers +rendering, searching and publishing tools. + +All these tools require a running http server and, if possible, a +MathML compliant browser. The procedure to install the suite of tools +ultimately allowing rendering and searching can be found on the HELM +web site \url{http://helm.cs.unibo.it/library.html}. + +It may be easier though to upload your developments on the HELM http +server and to re-use the infrastructure running on it. This requires +publishing your development. To this aim, follow the instructions on +\url{http://mowgli.cs.unibo.it}. + +Notice that the HELM server already hosts a copy of the standard +library of {\Coq} and of the {\Coq} user contributions. + +\subsection{Technical informations} + +\subsubsection{CIC with Explicit Named Substitutions} + +The exported files are XML encoding of the lambda-terms used by the +\Coq\ system. The implementative details of the \Coq\ system are hidden as much +as possible, so that the XML DTD is a straightforward encoding of the +Calculus of (Co)Inductive Constructions. + +Nevertheless, there is a feature of the \Coq\ system that can not be +hidden in a completely satisfactory way: discharging (see Sect.\ref{Section}). +In \Coq\ it is possible +to open a section, declare variables and use them in the rest of the section +as if they were axiom declarations. Once the section is closed, every definition and theorem in the section is discharged by abstracting it over the section +variables. Variable declarations as well as section declarations are entirely +dropped. Since we are interested in an XML encoding of definitions and +theorems as close as possible to those directly provided the user, we +do not want to export discharged forms. Exporting non-discharged theorem +and definitions together with theorems that rely on the discharged forms +obliges the tools that work on the XML encoding to implement discharging to +achieve logical consistency. Moreover, the rendering of the files can be +misleading, since hyperlinks can be shown between occurrences of the discharge +form of a definition and the non-discharged definition, that are different +objects. + +To overcome the previous limitations, Claudio Sacerdoti Coen developed in his +PhD. thesis an extension of CIC, called Calculus of (Co)Inductive Constructions +with Explicit Named Substitutions, that is a slight extension of CIC where +discharging is not necessary. The DTD of the exported XML files describes +constants, inductive types and variables of the Calculus of (Co)Inductive +Constructions with Explicit Named Substitutions. The conversion to the new +calculus is performed during the exportation phase. + +The following example shows a very small \Coq\ development together with its +version in CIC with Explicit Named Substitutions. + +\begin{verbatim} +# CIC version: # +Section S. + Variable A : Prop. + + Definition impl := A -> A. + + Theorem t : impl. (* uses the undischarged form of impl *) + Proof. + exact (fun (a:A) => a). + Qed. + +End S. + +Theorem t' : (impl False). (* uses the discharged form of impl *) + Proof. + exact (t False). (* uses the discharged form of t *) + Qed. +\end{verbatim} + +\begin{verbatim} +# Corresponding CIC with Explicit Named Substitutions version: # +Section S. + Variable A : Prop. + + Definition impl(A) := A -> A. (* theorems and definitions are + explicitly abstracted over the + variables. The name is sufficient to + completely describe the abstraction *) + + Theorem t(A) : impl. (* impl where A is not instantiated *) + Proof. + exact (fun (a:A) => a). + Qed. + +End S. + +Theorem t'() : impl{False/A}. (* impl where A is instantiated with False + Notice that t' does not depend on A *) + Proof. + exact t{False/A}. (* t where A is instantiated with False *) + Qed. +\end{verbatim} + +Further details on the typing and reduction rules of the calculus can be +found in Claudio Sacerdoti Coen PhD. dissertation, where the consistency +of the calculus is also proved. + +\subsubsection{The CIC with Explicit Named Substitutions XML DTD} + +A copy of the DTD can be found in the file ``\verb+cic.dtd+'' in the +\verb+contrib/xml+ source directory of \Coq. +The following is a very brief overview of the elements described in the DTD. + +\begin{description} + \item[]\texttt{} + is the root element of the files that correspond to constant types. + \item[]\texttt{} + is the root element of the files that correspond to constant bodies. + It is used only for closed definitions and theorems (i.e. when no + metavariable occurs in the body or type of the constant) + \item[]\texttt{} + is the root element of the file that correspond to the body of a constant + that depends on metavariables (e.g. unfinished proofs) + \item[]\texttt{} + is the root element of the files that correspond to variables + \item[]\texttt{} + is the root element of the files that correspond to blocks + of mutually defined inductive definitions +\end{description} + +The elements + \verb++, \verb++, \verb++, \verb++, \verb++, + \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, \verb++, + \verb++ and \verb++ are used to encode the constructors of CIC. + The \verb+sort+ or \verb+type+ attribute of the element, if present, is + respectively the sort or the type of the term, that is a sort because of the + typing rules of CIC. + +The element \verb++ correspond to the application of an explicit +named substitution to its first argument, that is a reference to a definition +or declaration in the environment. + +All the other elements are just syntactic sugar. + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Natural.tex b/doc/refman/Natural.tex new file mode 100644 index 00000000..69dfab87 --- /dev/null +++ b/doc/refman/Natural.tex @@ -0,0 +1,425 @@ +\achapter{\texttt{Natural} : proofs in natural language} +\aauthor{Yann Coscoy} + +\asection{Introduction} + +\Natural~ is a package allowing the writing of proofs in natural +language. For instance, the proof in \Coq~of the induction principle on pairs +of natural numbers looks like this: + +\begin{coq_example*} +Require Natural. +\end{coq_example*} +\begin{coq_example} +Print nat_double_ind. +\end{coq_example} + +Piping it through the \Natural~pretty-printer gives: + +\comindex{Print Natural} +\begin{coq_example} +Print Natural nat_double_ind. +\end{coq_example} + +\asection{Activating \Natural} + +To enable the printing of proofs in natural language, you should +type under \texttt{coqtop} or \texttt{coqtop -full} the command + +\begin{coq_example*} +Require Natural. +\end{coq_example*} + +By default, proofs are transcripted in english. If you wish to print them +in French, set the French option by + +\comindex{Set Natural} +\begin{coq_example*} +Set Natural French. +\end{coq_example*} + +If you want to go back to English, type in + +\begin{coq_example*} +Set Natural English. +\end{coq_example*} + +Currently, only \verb=French= and \verb=English= are available. + +You may see for example the natural transcription of the proof of +the induction principle on pairs of natural numbers: + +\begin{coq_example*} +Print Natural nat_double_ind. +\end{coq_example*} + +You may also show in natural language the current proof in progress: + +\comindex{Show Natural} +\begin{coq_example} +Goal (n:nat)(le O n). +Induction n. +Show Natural Proof. +\end{coq_example} + +\subsection*{Restrictions} + +For \Natural, a proof is an object of type a proposition (i.e. an +object of type something of type {\tt Prop}). Only proofs are written +in natural language when typing {\tt Print Natural \ident}. All other +objects (the objects of type something which is of type {\tt Set} or +{\tt Type}) are written as usual $\lambda$-terms. + +\asection{Customizing \Natural} + +The transcription of proofs in natural language is mainly a paraphrase of +the formal proofs, but some specific hints in the transcription +can be given. +Three kinds of customization are available. + +\asubsection{Implicit proof steps} + +\subsubsection*{Implicit lemmas} + +Applying a given lemma or theorem \verb=lem1= of statement, say $A +\Rightarrow B$, to an hypothesis, say $H$ (assuming $A$) produces the +following kind of output translation: + +\begin{verbatim} +... +Using lem1 with H we get B. +... +\end{verbatim} + +But sometimes, you may prefer not to see the explicit invocation to +the lemma. You may prefer to see: + +\begin{verbatim} +... +With H we have A. +... +\end{verbatim} + +This is possible by declaring the lemma as implicit. You should type: + +\comindex{Add Natural} +\begin{coq_example*} +Add Natural Implicit lem1. +\end{coq_example*} + +By default, the lemmas \verb=proj1=, \verb=proj2=, \verb=sym_equal= +and \verb=sym_eqT= are declared implicit. To remove a lemma or a theorem +previously declared as implicit, say \verb=lem1=, use the command + +\comindex{Remove Natural} +\begin{coq_example*} +Remove Natural Implicit lem1. +\end{coq_example*} + +To test if the lemma or theorem \verb=lem1= is, or is not, +declared as implicit, type + +\comindex{Test Natural} +\begin{coq_example*} +Test Natural Implicit lem1. +\end{coq_example*} + +\subsubsection*{Implicit proof constructors} + +Let \verb=constr1= be a proof constructor of a given inductive +proposition (or predicate) +\verb=Q= (of type \verb=Prop=). Assume \verb=constr1= proves +\verb=(x:A)(P x)->(Q x)=. Then, applying \verb=constr1= to an hypothesis, +say \verb=H= (assuming \verb=(P a)=) produces the following kind of output: + +\begin{verbatim} +... +By the definition of Q, with H we have (Q a). +... +\end{verbatim} + +But sometimes, you may prefer not to see the explicit invocation to +this constructor. You may prefer to see: + +\begin{verbatim} +... +With H we have (Q a). +... +\end{verbatim} + +This is possible by declaring the constructor as implicit. You should +type, as before: + +\comindex{Add Natural Implicit} +\begin{coq_example*} +Add Natural Implicit constr1. +\end{coq_example*} + +By default, the proposition (or predicate) constructors + +\verb=conj=, \verb=or_introl=, \verb=or_intror=, \verb=ex_intro=, +\verb=exT_intro=, \verb=refl_equal=, \verb=refl_eqT= and \verb=exist= + +\noindent are declared implicit. Note that declaring implicit the +constructor of a datatype (i.e. an inductive type of type \verb=Set=) +has no effect. + +As above, you can remove or test a constant declared implicit. + +\subsubsection*{Implicit inductive constants} + +Let \verb=Ind= be an inductive type (either a proposition (or a +predicate) -- on \verb=Prop= --, or a datatype -- on \verb=Set=). +Suppose the proof proceeds by induction on an hypothesis \verb=h= +proving \verb=Ind= (or more generally \verb=(Ind A1 ... An)=). The +following kind of output is produced: + +\begin{verbatim} +... +With H, we will prove A by induction on the definition of Ind. +Case 1. ... +Case 2. ... +... +\end{verbatim} + +But sometimes, you may prefer not to see the explicit invocation to +\verb=Ind=. You may prefer to see: + +\begin{verbatim} +... +We will prove A by induction on H. +Case 1. ... +Case 2. ... +... +\end{verbatim} + +This is possible by declaring the inductive type as implicit. You should +type, as before: + +\comindex{Add Natural Implicit} +\begin{coq_example*} +Add Natural Implicit Ind. +\end{coq_example*} + +This kind of parameterization works for any inductively defined +proposition (or predicate) or datatype. Especially, it works whatever +the definition is recursive or purely by cases. + +By default, the data type \verb=nat= and the inductive connectives +\verb=and=, \verb=or=, \verb=sig=, \verb=False=, \verb=eq=, +\verb=eqT=, \verb=ex= and \verb=exT= are declared implicit. + +As above, you can remove or test a constant declared implicit. Use +{\tt Remove Natural Contractible $id$} or {\tt Test Natural +Contractible $id$}. + +\asubsection{Contractible proof steps} + +\subsubsection*{Contractible lemmas or constructors} + +Some lemmas, theorems or proof constructors of inductive predicates are +often applied in a row and you obtain an output of this kind: + +\begin{verbatim} +... +Using T with H1 and H2 we get P. + * By H3 we have Q. + Using T with theses results we get R. +... +\end{verbatim} + +where \verb=T=, \verb=H1=, \verb=H2= and \verb=H3= prove statements +of the form \verb=(X,Y:Prop)X->Y->(L X Y)=, \verb=A=, \verb=B= and \verb=C= +respectively (and thus \verb=R= is \verb=(L (L A B) C)=). + +You may obtain a condensed output of the form + +\begin{verbatim} +... +Using T with H1, H2, and H3 we get R. +... +\end{verbatim} + +by declaring \verb=T= as contractible: + +\comindex{Add Natural Contractible} +\begin{coq_example*} +Add Natural Contractible T. +\end{coq_example*} + +By default, the lemmas \verb=proj1=, \verb=proj2= and the proof +constructors \verb=conj=, \verb=or_introl=, \verb=or_intror= are +declared contractible. As for implicit notions, you can remove or +test a lemma or constructor declared contractible. + +\subsubsection*{Contractible induction steps} + +Let \verb=Ind= be an inductive type. When the proof proceeds by +induction in a row, you may obtain an output of this kind: + +\begin{verbatim} +... +We have (Ind A (Ind B C)). +We use definition of Ind in a study in two cases. +Case 1: We have A. +Case 2: We have (Ind B C). + We use definition of Ind in a study of two cases. + Case 2.1: We have B. + Case 2.2: We have C. +... +\end{verbatim} + +You may prefer to see + +\begin{verbatim} +... +We have (Ind A (Ind B C)). +We use definition of Ind in a study in three cases. +Case 1: We have A. +Case 2: We have B. +Case 3: We have C. +... +\end{verbatim} + +This is possible by declaring \verb=Ind= as contractible: + +\begin{coq_example*} +Add Natural Contractible T. +\end{coq_example*} + +By default, only \verb=or= is declared as a contractible inductive +constant. +As for implicit notions, you can remove or test an inductive notion declared +contractible. + +\asubsection{Transparent definitions} + +``Normal'' definitions are all constructions except proofs and proof constructors. + +\subsubsection*{Transparent non inductive normal definitions} + +When using the definition of a non inductive constant, say \verb=D=, the +following kind of output is produced: + +\begin{verbatim} +... +We have proved C which is equivalent to D. +... +\end{verbatim} + +But you may prefer to hide that D comes from the definition of C as +follows: + +\begin{verbatim} +... +We have prove D. +... +\end{verbatim} + +This is possible by declaring \verb=C= as transparent: + +\comindex{Add Natural Transparent} +\begin{coq_example*} +Add Natural Transparent D. +\end{coq_example*} + +By default, only \verb=not= (normally written \verb=~=) is declared as +a non inductive transparent definition. +As for implicit and contractible definitions, you can remove or test a +non inductive definition declared transparent. +Use \texttt{Remove Natural Transparent} \ident or +\texttt{Test Natural Transparent} \ident. + +\subsubsection*{Transparent inductive definitions} + +Let \verb=Ind= be an inductive proposition (more generally: a +predicate \verb=(Ind x1 ... xn)=). Suppose the definition of +\verb=Ind= is non recursive and built with just +one constructor proving something like \verb=A -> B -> Ind=. +When coming back to the definition of \verb=Ind= the +following kind of output is produced: + +\begin{verbatim} +... +Assume Ind (H). + We use H with definition of Ind. + We have A and B. + ... +\end{verbatim} + +When \verb=H= is not used a second time in the proof, you may prefer +to hide that \verb=A= and \verb=B= comes from the definition of +\verb=Ind=. You may prefer to get directly: + +\begin{verbatim} +... +Assume A and B. +... +\end{verbatim} + +This is possible by declaring \verb=Ind= as transparent: + +\begin{coq_example*} +Add Natural Transparent Ind. +\end{coq_example*} + +By default, \verb=and=, \verb=or=, \verb=ex=, \verb=exT=, \verb=sig= +are declared as inductive transparent constants. As for implicit and +contractible constants, you can remove or test an inductive +constant declared transparent. + +As for implicit and contractible constants, you can remove or test an +inductive constant declared transparent. + +\asubsection{Extending the maximal depth of nested text} + +The depth of nested text is limited. To know the current depth, do: + +\comindex{Set Natural Depth} +\begin{coq_example} +Set Natural Depth. +\end{coq_example} + +To change the maximal depth of nested text (for instance to 125) do: + +\begin{coq_example} +Set Natural Depth 125. +\end{coq_example} + +\asubsection{Restoring the default parameterization} + +The command \verb=Set Natural Default= sets back the parameterization tables of +\Natural~ to their default values, as listed in the above sections. +Moreover, the language is set back to English and the max depth of +nested text is set back to its initial value. + +\asubsection{Printing the current parameterization} + +The commands {\tt Print Natural Implicit}, {\tt Print Natural +Contractible} and {\tt Print \\ Natural Transparent} print the list of +constructions declared {\tt Implicit}, {\tt Contractible}, +{\tt Transparent} respectively. + +\asubsection{Interferences with \texttt{Reset}} + +The customization of \texttt{Natural} is dependent of the \texttt{Reset} +command. If you reset the environment back to a point preceding an +\verb=Add Natural ...= command, the effect of the command will be +erased. Similarly, a reset back to a point before a +\verb=Remove Natural ... = command invalidates the removal. + +\asection{Error messages} + +An error occurs when trying to \verb=Print=, to \verb=Add=, to +\verb=Test=, or to \verb=remove= an undefined ident. Similarly, an +error occurs when trying to set a language unknown from \Natural. +Errors may also occur when trying to parameterize the printing of +proofs: some parameterization are effectively forbidden. +Note that to \verb=Remove= an ident absent from a table or to +\verb=Add= to a table an already present ident does not lead to an +error. + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Omega.tex b/doc/refman/Omega.tex new file mode 100644 index 00000000..bbf17f63 --- /dev/null +++ b/doc/refman/Omega.tex @@ -0,0 +1,226 @@ +\achapter{Omega: a solver of quantifier-free problems in +Presburger Arithmetic} +\aauthor{Pierre Crégut} +\label{OmegaChapter} + +\asection{Description of {\tt omega}} +\tacindex{omega} +\label{description} + +{\tt omega} solves a goal in Presburger arithmetic, i.e. a universally +quantified formula made of equations and inequations. Equations may +be specified either on the type \verb=nat= of natural numbers or on +the type \verb=Z= of binary-encoded integer numbers. Formulas on +\verb=nat= are automatically injected into \verb=Z=. The procedure +may use any hypothesis of the current proof session to solve the goal. + +Multiplication is handled by {\tt omega} but only goals where at +least one of the two multiplicands of products is a constant are +solvable. This is the restriction meaned by ``Presburger arithmetic''. + +If the tactic cannot solve the goal, it fails with an error message. +In any case, the computation eventually stops. + +\asubsection{Arithmetical goals recognized by {\tt omega}} + +{\tt omega} applied only to quantifier-free formulas built from the +connectors + +\begin{quote} +\verb=/\, \/, ~, ->= +\end{quote} + +on atomic formulas. Atomic formulas are built from the predicates + +\begin{quote} +\verb!=, le, lt, gt, ge! +\end{quote} + + on \verb=nat= or from the predicates + +\begin{quote} +\verb!=, <, <=, >, >=! +\end{quote} + + on \verb=Z=. In expressions of type \verb=nat=, {\tt omega} recognizes + +\begin{quote} +\verb!plus, minus, mult, pred, S, O! +\end{quote} + +and in expressions of type \verb=Z=, {\tt omega} recognizes + +\begin{quote} +\verb!+, -, *, Zsucc!, and constants. +\end{quote} + +All expressions of type \verb=nat= or \verb=Z= not built on these +operators are considered abstractly as if they +were arbitrary variables of type \verb=nat= or \verb=Z=. + +\asubsection{Messages from {\tt omega}} +\label{errors} + +When {\tt omega} does not solve the goal, one of the following errors +is generated: + +\begin{ErrMsgs} + +\item \errindex{omega can't solve this system} + + This may happen if your goal is not quantifier-free (if it is + universally quantified, try {\tt intros} first; if it contains + existentials quantifiers too, {\tt omega} is not strong enough to solve your + goal). This may happen also if your goal contains arithmetical + operators unknown from {\tt omega}. Finally, your goal may be really + wrong! + +\item \errindex{omega: Not a quantifier-free goal} + + If your goal is universally quantified, you should first apply {\tt + intro} as many time as needed. + +\item \errindex{omega: Unrecognized predicate or connective: {\sl ident}} + +\item \errindex{omega: Unrecognized atomic proposition: {\sl prop}} + +\item \errindex{omega: Can't solve a goal with proposition variables} + +\item \errindex{omega: Unrecognized proposition} + +\item \errindex{omega: Can't solve a goal with non-linear products} + +\item \errindex{omega: Can't solve a goal with equality on {\sl type}} + +\end{ErrMsgs} + +%% Ce code est débranché pour l'instant +%% +% \asubsection{Control over the output} +% There are some flags that can be set to get more information on the procedure + +% \begin{itemize} +% \item \verb=Time= to get the time used by the procedure +% \item \verb=System= to visualize the normalized systems. +% \item \verb=Action= to visualize the actions performed by the OMEGA +% procedure (see \ref{technical}). +% \end{itemize} + +% \comindex{Set omega Time} +% \comindex{UnSet omega Time} +% \comindex{Switch omega Time} +% \comindex{Set omega System} +% \comindex{UnSet omega System} +% \comindex{Switch omega System} +% \comindex{Set omega Action} +% \comindex{UnSet omega Action} +% \comindex{Switch omega Action} + +% Use {\tt Set omega {\rm\sl flag}} to set the flag +% {\rm\sl flag}. Use {\tt Unset omega {\rm\sl flag}} to unset it and +% {\tt Switch omega {\rm\sl flag}} to toggle it. + +\section{Using {\tt omega}} + +The {\tt omega} tactic does not belong to the core system. It should be +loaded by +\begin{coq_example*} +Require Import Omega. +Open Scope Z_scope. +\end{coq_example*} + +\example{} + +\begin{coq_example} +Goal forall m n:Z, 1 + 2 * m <> 2 * n. +intros; omega. +\end{coq_example} +\begin{coq_eval} +Abort. +\end{coq_eval} + +\example{} + +\begin{coq_example} +Goal forall z:Z, z > 0 -> 2 * z + 1 > z. +intro; omega. +\end{coq_example} + +% Other examples can be found in \verb+$COQLIB/theories/DEMOS/OMEGA+. + +\asection{Technical data} +\label{technical} + +\asubsection{Overview of the tactic} +\begin{itemize} + +\item The goal is negated twice and the first negation is introduced as an + hypothesis. +\item Hypothesis are decomposed in simple equations or inequations. Multiple + goals may result from this phase. +\item Equations and inequations over \verb=nat= are translated over + \verb=Z=, multiple goals may result from the translation of + substraction. +\item Equations and inequations are normalized. +\item Goals are solved by the {\it OMEGA} decision procedure. +\item The script of the solution is replayed. + +\end{itemize} + +\asubsection{Overview of the {\it OMEGA} decision procedure} + +The {\it OMEGA} decision procedure involved in the {\tt omega} tactic uses +a small subset of the decision procedure presented in + +\begin{quote} + "The Omega Test: a fast and practical integer programming +algorithm for dependence analysis", William Pugh, Communication of the +ACM , 1992, p 102-114. +\end{quote} + +Here is an overview, look at the original paper for more information. + +\begin{itemize} + +\item Equations and inequations are normalized by division by the GCD of their + coefficients. +\item Equations are eliminated, using the Banerjee test to get a coefficient + equal to one. +\item Note that each inequation defines a half space in the space of real value + of the variables. + \item Inequations are solved by projecting on the hyperspace + defined by cancelling one of the variable. They are partitioned + according to the sign of the coefficient of the eliminated + variable. Pairs of inequations from different classes define a + new edge in the projection. + \item Redundant inequations are eliminated or merged in new + equations that can be eliminated by the Banerjee test. +\item The last two steps are iterated until a contradiction is reached + (success) or there is no more variable to eliminate (failure). + +\end{itemize} + +It may happen that there is a real solution and no integer one. The last +steps of the Omega procedure (dark shadow) are not implemented, so the +decision procedure is only partial. + +\asection{Bugs} + +\begin{itemize} +\item The simplification procedure is very dumb and this results in + many redundant cases to explore. + +\item Much too slow. + +\item Certainly other bugs! You can report them to + +\begin{quote} + \url{Pierre.Cregut@cnet.francetelecom.fr} +\end{quote} + +\end{itemize} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Polynom.tex b/doc/refman/Polynom.tex new file mode 100644 index 00000000..70889c9d --- /dev/null +++ b/doc/refman/Polynom.tex @@ -0,0 +1,504 @@ +\achapter{The \texttt{ring} tactic} +\aauthor{Patrick Loiseleur and Samuel Boutin} +\label{ring} +\tacindex{ring} + +This chapter presents the \texttt{ring} tactic. + +\asection{What does this tactic?} + +\texttt{ring} does associative-commutative rewriting in ring and semi-ring +structures. Assume you have two binary functions $\oplus$ and $\otimes$ +that are associative and commutative, with $\oplus$ distributive on +$\otimes$, and two constants 0 and 1 that are unities for $\oplus$ and +$\otimes$. A \textit{polynomial} is an expression built on variables $V_0, V_1, +\dots$ and constants by application of $\oplus$ and $\otimes$. + +Let an {\it ordered product} be a product of variables $V_{i_1} \otimes +\ldots \otimes V_{i_n}$ verifying $i_1 \le i_2 \le \dots \le i_n$. Let a +\textit{monomial} be the product of a constant (possibly equal to 1, in +which case we omit it) and an ordered product. We can order the +monomials by the lexicographic order on products of variables. Let a +\textit{canonical sum} be an ordered sum of monomials that are all +different, i.e. each monomial in the sum is strictly less than the +following monomial according to the lexicographic order. It is an easy +theorem to show that every polynomial is equivalent (modulo the ring +properties) to exactly one canonical sum. This canonical sum is called +the \textit{normal form} of the polynomial. So what does \texttt{ring}? It +normalizes polynomials over any ring or semi-ring structure. The basic +use of \texttt{ring} is to simplify ring expressions, so that the user +does not have to deal manually with the theorems of associativity and +commutativity. + +\begin{Examples} +\item In the ring of integers, the normal form of +$x (3 + yx + 25(1 - z)) + zx$ is $28x + (-24)xz + xxy$. +\item For the classical propositional calculus (or the boolean rings) + the normal form is what logicians call \textit{disjunctive normal + form}: every formula is equivalent to a disjunction of + conjunctions of atoms. (Here $\oplus$ is $\vee$, $\otimes$ is + $\wedge$, variables are atoms and the only constants are T and F) +\end{Examples} + +\asection{The variables map} + +It is frequent to have an expression built with + and + $\times$, but rarely on variables only. +Let us associate a number to each subterm of a ring +expression in the \gallina\ language. For example in the ring +\texttt{nat}, consider the expression: + +\begin{quotation} +\begin{verbatim} +(plus (mult (plus (f (5)) x) x) + (mult (if b then (4) else (f (3))) (2))) +\end{verbatim} +\end{quotation} + +\noindent As a ring expression, is has 3 subterms. Give each subterm a +number in an arbitrary order: + +\begin{tabular}{ccl} +0 & $\mapsto$ & \verb|if b then (4) else (f (3))| \\ +1 & $\mapsto$ & \verb|(f (5))| \\ +2 & $\mapsto$ & \verb|x| \\ +\end{tabular} + +\noindent Then normalize the ``abstract'' polynomial + +$$((V_1 \otimes V_2) \oplus V_2) \oplus (V_0 \otimes 2) $$ + +\noindent In our example the normal form is: + +$$(2 \otimes V_0) \oplus (V_1 \otimes V_2) \oplus (V_2 \otimes V_2)$$ + +\noindent Then substitute the variables by their values in the variables map to +get the concrete normal polynomial: + +\begin{quotation} +\begin{verbatim} +(plus (mult (2) (if b then (4) else (f (3)))) + (plus (mult (f (5)) x) (mult x x))) +\end{verbatim} +\end{quotation} + +\asection{Is it automatic?} + +Yes, building the variables map and doing the substitution after +normalizing is automatically done by the tactic. So you can just forget +this paragraph and use the tactic according to your intuition. + +\asection{Concrete usage in \Coq} + +Under a session launched by \texttt{coqtop} or \texttt{coqtop -full}, +load the \texttt{ring} files with the command: + +\begin{quotation} +\begin{verbatim} +Require Ring. +\end{verbatim} +\end{quotation} + +It does not work under \texttt{coqtop -opt} because the compiled ML +objects used by the tactic are not linked in this binary image, and +dynamic loading of native code is not possible in \ocaml. + +In order to use \texttt{ring} on naturals, load \texttt{ArithRing} +instead; for binary integers, load the module \texttt{ZArithRing}. + +Then, to normalize the terms $term_1$, \dots, $term_n$ in +the current subgoal, use the tactic: + +\begin{quotation} +\texttt{ring} $term_1$ \dots{} $term_n$ +\end{quotation} +\tacindex{ring} + +Then the tactic guesses the type of given terms, the ring theory to +use, the variables map, and replace each term with its normal form. +The variables map is common to all terms + +\Warning \texttt{ring $term_1$; ring $term_2$} is not equivalent to +\texttt{ring $term_1$ $term_2$}. In the latter case the variables map +is shared between the two terms, and common subterm $t$ of $term_1$ +and $term_2$ will have the same associated variable number. + +\begin{ErrMsgs} +\item \errindex{All terms must have the same type} +\item \errindex{Don't know what to do with this goal} +\item \errindex{No Declared Ring Theory for \term.} + + \texttt{Use Add [Semi] Ring to declare it} + + That happens when all terms have the same type \term, but there is + no declared ring theory for this set. See below. +\end{ErrMsgs} + +\begin{Variants} +\item \texttt{ring} + + That works if the current goal is an equality between two + polynomials. It will normalize both sides of the + equality, solve it if the normal forms are equal and in other cases + try to simplify the equality using \texttt{congr\_eqT} and \texttt{refl\_equal} + to reduce $x + y = x + z$ to $y = z$ and $x * z = x * y$ to $y = z$. + + \ErrMsg\errindex{This goal is not an equality} + +\end{Variants} + +\asection{Add a ring structure} + +It can be done in the \Coq toplevel (No ML file to edit and to link +with \Coq). First, \texttt{ring} can handle two kinds of structure: +rings and semi-rings. Semi-rings are like rings without an opposite to +addition. Their precise specification (in \gallina) can be found in +the file + +\begin{quotation} +\begin{verbatim} +contrib/ring/Ring_theory.v +\end{verbatim} +\end{quotation} + +The typical example of ring is \texttt{Z}, the typical +example of semi-ring is \texttt{nat}. + +The specification of a +ring is divided in two parts: first the record of constants +($\oplus$, $\otimes$, 1, 0, $\ominus$) and then the theorems +(associativity, commutativity, etc.). + +\begin{small} +\begin{flushleft} +\begin{verbatim} +Section Theory_of_semi_rings. + +Variable A : Type. +Variable Aplus : A -> A -> A. +Variable Amult : A -> A -> A. +Variable Aone : A. +Variable Azero : A. +(* There is also a "weakly decidable" equality on A. That means + that if (A_eq x y)=true then x=y but x=y can arise when + (A_eq x y)=false. On an abstract ring the function [x,y:A]false + is a good choice. The proof of A_eq_prop is in this case easy. *) +Variable Aeq : A -> A -> bool. + +Record Semi_Ring_Theory : Prop := +{ SR_plus_sym : (n,m:A)[| n + m == m + n |]; + SR_plus_assoc : (n,m,p:A)[| n + (m + p) == (n + m) + p |]; + + SR_mult_sym : (n,m:A)[| n*m == m*n |]; + SR_mult_assoc : (n,m,p:A)[| n*(m*p) == (n*m)*p |]; + SR_plus_zero_left :(n:A)[| 0 + n == n|]; + SR_mult_one_left : (n:A)[| 1*n == n |]; + SR_mult_zero_left : (n:A)[| 0*n == 0 |]; + SR_distr_left : (n,m,p:A) [| (n + m)*p == n*p + m*p |]; + SR_plus_reg_left : (n,m,p:A)[| n + m == n + p |] -> m==p; + SR_eq_prop : (x,y:A) (Is_true (Aeq x y)) -> x==y +}. +\end{verbatim} +\end{flushleft} +\end{small} + +\begin{small} +\begin{flushleft} +\begin{verbatim} +Section Theory_of_rings. + +Variable A : Type. + +Variable Aplus : A -> A -> A. +Variable Amult : A -> A -> A. +Variable Aone : A. +Variable Azero : A. +Variable Aopp : A -> A. +Variable Aeq : A -> A -> bool. + + +Record Ring_Theory : Prop := +{ Th_plus_sym : (n,m:A)[| n + m == m + n |]; + Th_plus_assoc : (n,m,p:A)[| n + (m + p) == (n + m) + p |]; + Th_mult_sym : (n,m:A)[| n*m == m*n |]; + Th_mult_assoc : (n,m,p:A)[| n*(m*p) == (n*m)*p |]; + Th_plus_zero_left :(n:A)[| 0 + n == n|]; + Th_mult_one_left : (n:A)[| 1*n == n |]; + Th_opp_def : (n:A) [| n + (-n) == 0 |]; + Th_distr_left : (n,m,p:A) [| (n + m)*p == n*p + m*p |]; + Th_eq_prop : (x,y:A) (Is_true (Aeq x y)) -> x==y +}. +\end{verbatim} +\end{flushleft} +\end{small} + +To define a ring structure on A, you must provide an addition, a +multiplication, an opposite function and two unities 0 and 1. + +You must then prove all theorems that make +(A,Aplus,Amult,Aone,Azero,Aeq) +a ring structure, and pack them with the \verb|Build_Ring_Theory| +constructor. + +Finally to register a ring the syntax is: + +\comindex{Add Ring} +\begin{quotation} + \texttt{Add Ring} \textit{A Aplus Amult Aone Azero Ainv Aeq T} + \texttt{[} \textit{c1 \dots cn} \texttt{].} +\end{quotation} + +\noindent where \textit{A} is a term of type \texttt{Set}, +\textit{Aplus} is a term of type \texttt{A->A->A}, +\textit{Amult} is a term of type \texttt{A->A->A}, +\textit{Aone} is a term of type \texttt{A}, +\textit{Azero} is a term of type \texttt{A}, +\textit{Ainv} is a term of type \texttt{A->A}, +\textit{Aeq} is a term of type \texttt{A->bool}, +\textit{T} is a term of type +\texttt{(Ring\_Theory }\textit{A Aplus Amult Aone Azero Ainv + Aeq}\texttt{)}. +The arguments \textit{c1 \dots cn}, +are the names of constructors which define closed terms: a +subterm will be considered as a constant if it is either one of the +terms \textit{c1 \dots cn} or the application of one of these terms to +closed terms. For \texttt{nat}, the given constructors are \texttt{S} +and \texttt{O}, and the closed terms are \texttt{O}, \texttt{(S O)}, +\texttt{(S (S O))}, \ldots + +\begin{Variants} +\item \texttt{Add Semi Ring} \textit{A Aplus Amult Aone Azero Aeq T} + \texttt{[} \textit{c1 \dots\ cn} \texttt{].}\comindex{Add Semi + Ring} + + There are two differences with the \texttt{Add Ring} command: there + is no inverse function and the term $T$ must be of type + \texttt{(Semi\_Ring\_Theory }\textit{A Aplus Amult Aone Azero + Aeq}\texttt{)}. + +\item \texttt{Add Abstract Ring} \textit{A Aplus Amult Aone Azero Ainv + Aeq T}\texttt{.}\comindex{Add Abstract Ring} + + This command should be used for when the operations of rings are not + computable; for example the real numbers of + \texttt{theories/REALS/}. Here $0+1$ is not beta-reduced to $1$ but + you still may want to \textit{rewrite} it to $1$ using the ring + axioms. The argument \texttt{Aeq} is not used; a good choice for + that function is \verb+[x:A]false+. + +\item \texttt{Add Abstract Semi Ring} \textit{A Aplus Amult Aone Azero + Aeq T}\texttt{.}\comindex{Add Abstract Semi Ring} + +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{Not a valid (semi)ring theory}. + + That happens when the typing condition does not hold. +\end{ErrMsgs} + +Currently, the hypothesis is made than no more than one ring structure +may be declared for a given type in \texttt{Set} or \texttt{Type}. +This allows automatic detection of the theory used to achieve the +normalization. On popular demand, we can change that and allow several +ring structures on the same set. + +The table of ring theories is compatible with the \Coq\ +sectioning mechanism. If you declare a ring inside a section, the +declaration will be thrown away when closing the section. +And when you load a compiled file, all the \texttt{Add Ring} +commands of this file that are not inside a section will be loaded. + +The typical example of ring is \texttt{Z}, and the typical example of +semi-ring is \texttt{nat}. Another ring structure is defined on the +booleans. + +\Warning Only the ring of booleans is loaded by default with the +\texttt{Ring} module. To load the ring structure for \texttt{nat}, +load the module \texttt{ArithRing}, and for \texttt{Z}, +load the module \texttt{ZArithRing}. + + +\asection{How does it work?} + +The code of \texttt{ring} is a good example of tactic written using +\textit{reflection} (or \textit{internalization}, it is synonymous). +What is reflection? Basically, it is writing \Coq{} tactics in \Coq, +rather than in \ocaml. From the philosophical point of view, it is +using the ability of the Calculus of Constructions to speak and reason +about itself. For the \texttt{ring} tactic we used \Coq\ as a +programming language and also as a proof environment to build a tactic +and to prove it correctness. + +The interested reader is strongly advised to have a look at the file +\texttt{Ring\_normalize.v}. Here a type for polynomials is defined: + +\begin{small} +\begin{flushleft} +\begin{verbatim} +Inductive Type polynomial := + Pvar : idx -> polynomial +| Pconst : A -> polynomial +| Pplus : polynomial -> polynomial -> polynomial +| Pmult : polynomial -> polynomial -> polynomial +| Popp : polynomial -> polynomial. +\end{verbatim} +\end{flushleft} +\end{small} + +There is also a type to represent variables maps, and an +interpretation function, that maps a variables map and a polynomial to an +element of the concrete ring: + +\begin{small} +\begin{flushleft} +\begin{verbatim} +Definition polynomial_simplify := [...] +Definition interp : (varmap A) -> (polynomial A) -> A := [...] +\end{verbatim} +\end{flushleft} +\end{small} + +A function to normalize polynomials is defined, and the big theorem is +its correctness w.r.t interpretation, that is: + +\begin{small} +\begin{flushleft} +\begin{verbatim} +Theorem polynomial_simplify_correct : forall (v:(varmap A))(p:polynomial) + (interp v (polynomial_simplify p)) + =(interp v p). +\end{verbatim} +\end{flushleft} +\end{small} + +(The actual code is slightly more complex: for efficiency, +there is a special datatype to represent normalized polynomials, +i.e. ``canonical sums''. But the idea is still the same). + +So now, what is the scheme for a normalization proof? Let \texttt{p} +be the polynomial expression that the user wants to normalize. First a +little piece of ML code guesses the type of \texttt{p}, the ring +theory \texttt{T} to use, an abstract polynomial \texttt{ap} and a +variables map \texttt{v} such that \texttt{p} is +$\beta\delta\iota$-equivalent to \verb|(interp v ap)|. Then we +replace it by \verb|(interp v (polynomial_simplify ap))|, using the +main correctness theorem and we reduce it to a concrete expression +\texttt{p'}, which is the concrete normal form of +\texttt{p}. This is summarized in this diagram: +\begin{center} +\begin{tabular}{rcl} +\texttt{p} & $\rightarrow_{\beta\delta\iota}$ + & \texttt{(interp v ap)} \\ + & & $=_{\mathrm{(by\ the\ main\ correctness\ theorem)}}$ \\ +\texttt{p'} + & $\leftarrow_{\beta\delta\iota}$ + & \texttt{(interp v (polynomial\_simplify ap))} +\end{tabular} +\end{center} +The user do not see the right part of the diagram. +From outside, the tactic behaves like a +$\beta\delta\iota$ simplification extended with AC rewriting rules. +Basically, the proof is only the application of the main +correctness theorem to well-chosen arguments. + +\asection{History of \texttt{ring}} + +First Samuel Boutin designed the tactic \texttt{ACDSimpl}. +This tactic did lot of rewriting. But the proofs +terms generated by rewriting were too big for \Coq's type-checker. +Let us see why: + +\begin{coq_eval} +Require Import ZArith. +Open Scope Z_scope. +\end{coq_eval} +\begin{coq_example} +Goal forall x y z:Z, x + 3 + y + y * z = x + 3 + y + z * y. +\end{coq_example} +\begin{coq_example*} +intros; rewrite (Zmult_comm y z); reflexivity. +Save toto. +\end{coq_example*} +\begin{coq_example} +Print toto. +\end{coq_example} + +At each step of rewriting, the whole context is duplicated in the proof +term. Then, a tactic that does hundreds of rewriting generates huge proof +terms. Since \texttt{ACDSimpl} was too slow, Samuel Boutin rewrote it +using reflection (see his article in TACS'97 \cite{Bou97}). Later, the +stuff was rewritten by Patrick +Loiseleur: the new tactic does not any more require \texttt{ACDSimpl} +to compile and it makes use of $\beta\delta\iota$-reduction +not only to replace the rewriting steps, but also to achieve the +interleaving of computation and +reasoning (see \ref{DiscussReflection}). He also wrote a +few ML code for the \texttt{Add Ring} command, that allow to register +new rings dynamically. + +Proofs terms generated by \texttt{ring} are quite small, they are +linear in the number of $\oplus$ and $\otimes$ operations in the +normalized terms. Type-checking those terms requires some time because it +makes a large use of the conversion rule, but +memory requirements are much smaller. + +\asection{Discussion} +\label{DiscussReflection} + +Efficiency is not the only motivation to use reflection +here. \texttt{ring} also deals with constants, it rewrites for example the +expression $34 + 2*x -x + 12$ to the expected result $x + 46$. For the +tactic \texttt{ACDSimpl}, the only constants were 0 and 1. So the +expression $34 + 2*(x - 1) + 12$ is interpreted as +$V_0 \oplus V_1 \otimes (V_2 \ominus 1) \oplus V_3$, +with the variables mapping +$\{V_0 \mt 34; V_1 \mt 2; V_2 \mt x; V_3 \mt 12 \}$. Then it is +rewritten to $34 - x + 2*x + 12$, very far from the expected +result. Here rewriting is not sufficient: you have to do some kind of +reduction (some kind of \textit{computation}) to achieve the +normalization. + +The tactic \texttt{ring} is not only faster than a classical one: +using reflection, we get for free integration of computation and +reasoning that would be very complex to implement in the classic fashion. + +Is it the ultimate way to write tactics? +The answer is: yes and no. The \texttt{ring} tactic +uses intensively the conversion +rule of \CIC, that is replaces proof by computation the most as it is +possible. It can be useful in all situations where a classical tactic +generates huge proof terms. Symbolic Processing and Tautologies are +in that case. But there are also tactics like \texttt{Auto} or +\texttt{Linear}: that do many complex computations, using side-effects +and backtracking, and generate + a small proof term. Clearly, it would be a non-sense to +replace them by tactics using reflection. + +Another argument against the reflection is that \Coq, as a +programming language, has many nice features, like dependent types, +but is very far from the +speed and the expressive power of \ocaml. Wait a minute! With \Coq\ +it is possible to extract ML code from \CIC\ terms, right? So, why not +to link the extracted code with \Coq\ to inherit the benefits of the +reflection and the speed of ML tactics? That is called \textit{total + reflection}, and is still an active research subject. With these +technologies it will become possible to bootstrap the type-checker of +\CIC, but there is still some work to achieve that goal. + +Another brilliant idea from Benjamin Werner: reflection could be used +to couple a external tool (a rewriting program or a model checker) +with \Coq. We define (in \Coq) a type of terms, a type of +\emph{traces}, and prove a correction theorem that states that +\emph{replaying traces} is safe w.r.t some interpretation. Then we let +the external tool do every computation (using side-effects, +backtracking, exception, or others features that are not available in +pure lambda calculus) to produce the trace: now we replay the trace in +Coq{}, and apply the correction lemma. So internalization seems to be +the best way to import \dots{} external proofs! + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/Program.tex b/doc/refman/Program.tex new file mode 100644 index 00000000..ed1e6e63 --- /dev/null +++ b/doc/refman/Program.tex @@ -0,0 +1,491 @@ +\def\Program{\textsc{Program}} +\def\Russel{\textsc{Russel}} + +\achapter{The \Program{} tactic} +\label{Program} +\aauthor{Matthieu Sozeau} +\index{Program} + +\begin{flushleft} + \em The status of \Program is experimental. +\end{flushleft} + +We present here the \Coq\ \Program tactic commands, used to build certified +\Coq programs, elaborating them from their algorithmic skeleton and a +rich specification. It can be sought of as a dual of extraction \ref{Extraction}. +The languages available as input are currently restricted to \Coq's term +language, but may be extended to \ocaml{}, \textsc{Haskell} and others +in the future. Input terms and types are typed in an extended system (\Russel) and +interpreted into \Coq\ terms. The interpretation process may produce +some proof obligations which need to be resolved to create the final term. + +\asection{Elaborating programs} +\comindex{Program Fixpoint} + +The next two commands are similar to they standard counterparts +\ref{Simpl-definitions} and \ref{Fixpoint} in that +they define constants. However, they may require the user to prove some +goals to construct the final definitions. + +\section{\tt Program Definition {\ident} := {\term}. + \comindex{Program Definition}\label{ProgramDefinition}} + +This command types the value {\term} in \Russel and generate subgoals +corresponding to proof obligations. Once solved, it binds the final +\Coq\ term to the name {\ident} in the environment. + +\begin{ErrMsgs} +\item \errindex{{\ident} already exists} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Program Definition {\ident} {\tt :}{\term$_1$} := + {\term$_2$}.}\\ + It interprets the type {\term$_1$}, potentially generating proof + obligations to be resolved. Once done with them, we have a \Coq\ type + {\term$_1'$}. It then checks that the type of the interpretation of + {\term$_2$} is coercible to {\term$_1'$}, and registers {\ident} as + being of type {\term$_1'$} once the set of obligations generated + during the interpretation of {\term$_2$} and the aforementioned + coercion derivation are solved. +\item {\tt Program Definition {\ident} {\binder$_1$}\ldots{\binder$_n$} + {\tt :}\term$_1$ {\tt :=} {\term$_2$}.}\\ + This is equivalent to \\ + {\tt Program Definition\,{\ident}\,{\tt :\,forall}\,% + {\binder$_1$}\ldots{\binder$_n$}{\tt ,}\,\term$_1$\,{\tt :=}}\,% + {\tt fun}\,{\binder$_1$}\ldots{\binder$_n$}\,{\tt =>}\,{\term$_2$}\,% + {\tt .} +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{In environment {\dots} the term: {\term$_2$} does not have type + {\term$_1$}}.\\ + \texttt{Actually, it has type {\term$_3$}}. +\end{ErrMsgs} + +\SeeAlso Sections \ref{Opaque}, \ref{Transparent}, \ref{unfold} + +\section{\tt Program Fixpoint {\ident} {\params} {\tt \{order\}} : type$_0$ := \term$_0$ + \comindex{Program Fixpoint} + \label{ProgramFixpoint}} + +This command allows to define objects using a fixed point +construction. The meaning of this declaration is to define {\it ident} +a recursive function with arguments specified by +{\binder$_1$}\ldots{\binder$_n$} such that {\it ident} applied to +arguments corresponding to these binders has type \type$_0$, and is +equivalent to the expression \term$_0$. The type of the {\ident} is +consequently {\tt forall {\params} {\tt,} \type$_0$} +and the value is equivalent to {\tt fun {\params} {\tt =>} \term$_0$}. + +There are two ways to define fixpoints with \Program{}, structural and +well-founded recursion. + +\subsection{\tt Program Fixpoint {\ident} {\params} {\tt \{struct} + \ident$_0$ {\tt \}} : type$_0$ := \term$_0$ + \comindex{Program Fixpoint Struct} + \label{ProgramFixpointStruct}} + +To be accepted, a structural {\tt Fixpoint} definition has to satisfy some +syntactical constraints on a special argument called the decreasing +argument. They are needed to ensure that the {\tt Fixpoint} definition +always terminates. The point of the {\tt \{struct \ident {\tt \}}} +annotation is to let the user tell the system which argument decreases +along the recursive calls. This annotation may be left implicit for +fixpoints with one argument. For instance, one can define the identity +function on naturals as : + +\begin{coq_example} +Program Fixpoint id (n : nat) : { x : nat | x = n } := + match n with + | O => O + | S p => S (id p) + end. +\end{coq_example} + +The {\tt match} operator matches a value (here \verb:n:) with the +various constructors of its (inductive) type. The remaining arguments +give the respective values to be returned, as functions of the +parameters of the corresponding constructor. Thus here when \verb:n: +equals \verb:O: we return \verb:0:, and when \verb:n: equals +\verb:(S p): we return \verb:(S (id p)):. + +The {\tt match} operator is formally described +in detail in Section~\ref{Caseexpr}. The system recognizes that in +the inductive call {\tt (id p)} the argument actually +decreases because it is a {\em pattern variable} coming from {\tt match + n with}. + +Here again, proof obligations may be generated. In our example, we would +have one for each branch: +\begin{coq_example} +Show. +\end{coq_example} + +% \subsection{\tt Program Fixpoint {\ident} {(\ident_$_0$ : \type_$_0$) +% \cdots (\ident_$_n$ : \type_$_n$)} {\tt \{wf} +% \ident$_i$ \term_{wf} {\tt \}} : type$_t$ := \term$_0$ +% \comindex{Program Fixpoint Wf} +% \label{ProgramFixpointWf}} + +% To be accepted, a well-founded {\tt Fixpoint} definition has to satisfy some +% logical constraints on the decreasing argument. +% They are needed to ensure that the {\tt Fixpoint} definition +% always terminates. The point of the {\tt \{wf \ident \term {\tt \}}} +% annotation is to let the user tell the system which argument decreases +% in which well-founded relation along the recursive calls. +% The term \term$_0$ will be typed in a different context than usual, +% The typing problem will in fact be reduced to: + +% % \begin{center} +% % {\tt forall} {\params} {\ident : (\ident$_0$ : type$_0$) \cdots +% % \{ \ident$_i'$ : \type$_i$ | \term_{wf} \ident$_i'$ \ident$_i$ \} +% % \cdots (\ident$_n$ : type$_n$), type$_t$} : type$_t$ := \term$_0$ +% % \end{center} + +% \begin{coq_example} +% Program Fixpoint id (n : nat) : { x : nat | x = n } := +% match n with +% | O => O +% | S p => S (id p) +% end +% \end{coq_example} + +% The {\tt match} operator matches a value (here \verb:n:) with the +% various constructors of its (inductive) type. The remaining arguments +% give the respective values to be returned, as functions of the +% parameters of the corresponding constructor. Thus here when \verb:n: +% equals \verb:O: we return \verb:0:, and when \verb:n: equals +% \verb:(S p): we return \verb:(S (id p)):. + +% The {\tt match} operator is formally described +% in detail in Section~\ref{Caseexpr}. The system recognizes that in +% the inductive call {\tt (id p)} the argument actually +% decreases because it is a {\em pattern variable} coming from {\tt match +% n with}. + +% Here again, proof obligations may be generated. In our example, we would +% have one for each branch: +% \begin{coq_example} +% Show. +% \end{coq_example} +% \begin{coq_eval} +% Abort. +% \end{coq_eval} + + + + +% \asubsection{A detailed example: Euclidean division} + +% The file {\tt Euclid} contains the proof of Euclidean division +% (theorem {\tt eucl\_dev}). The natural numbers defined in the example +% files are unary integers defined by two constructors $O$ and $S$: +% \begin{coq_example*} +% Inductive nat : Set := +% | O : nat +% | S : nat -> nat. +% \end{coq_example*} + +% This module contains a theorem {\tt eucl\_dev}, and its extracted term +% is of type +% \begin{verbatim} +% forall b:nat, b > 0 -> forall a:nat, diveucl a b +% \end{verbatim} +% where {\tt diveucl} is a type for the pair of the quotient and the modulo. +% We can now extract this program to \ocaml: + +% \begin{coq_eval} +% Reset Initial. +% \end{coq_eval} +% \begin{coq_example} +% Require Import Euclid. +% Extraction Inline Wf_nat.gt_wf_rec Wf_nat.lt_wf_rec. +% Recursive Extraction eucl_dev. +% \end{coq_example} + +% The inlining of {\tt gt\_wf\_rec} and {\tt lt\_wf\_rec} is not +% mandatory. It only enhances readability of extracted code. +% You can then copy-paste the output to a file {\tt euclid.ml} or let +% \Coq\ do it for you with the following command: + +% \begin{coq_example} +% Extraction "euclid" eucl_dev. +% \end{coq_example} + +% Let us play the resulting program: + +% \begin{verbatim} +% # #use "euclid.ml";; +% type sumbool = Left | Right +% type nat = O | S of nat +% type diveucl = Divex of nat * nat +% val minus : nat -> nat -> nat = +% val le_lt_dec : nat -> nat -> sumbool = +% val le_gt_dec : nat -> nat -> sumbool = +% val eucl_dev : nat -> nat -> diveucl = +% # eucl_dev (S (S O)) (S (S (S (S (S O)))));; +% - : diveucl = Divex (S (S O), S O) +% \end{verbatim} +% It is easier to test on \ocaml\ integers: +% \begin{verbatim} +% # let rec i2n = function 0 -> O | n -> S (i2n (n-1));; +% val i2n : int -> nat = +% # let rec n2i = function O -> 0 | S p -> 1+(n2i p);; +% val n2i : nat -> int = +% # let div a b = +% let Divex (q,r) = eucl_dev (i2n b) (i2n a) in (n2i q, n2i r);; +% div : int -> int -> int * int = +% # div 173 15;; +% - : int * int = 11, 8 +% \end{verbatim} + +% \asubsection{Another detailed example: Heapsort} + +% The file {\tt Heap.v} +% contains the proof of an efficient list sorting algorithm described by +% Bjerner. Is is an adaptation of the well-known {\em heapsort} +% algorithm to functional languages. The main function is {\tt +% treesort}, whose type is shown below: + + +% \begin{coq_eval} +% Reset Initial. +% Require Import Relation_Definitions. +% Require Import List. +% Require Import Sorting. +% Require Import Permutation. +% \end{coq_eval} +% \begin{coq_example} +% Require Import Heap. +% Check treesort. +% \end{coq_example} + +% Let's now extract this function: + +% \begin{coq_example} +% Extraction Inline sort_rec is_heap_rec. +% Extraction NoInline list_to_heap. +% Extraction "heapsort" treesort. +% \end{coq_example} + +% One more time, the {\tt Extraction Inline} and {\tt NoInline} +% directives are cosmetic. Without it, everything goes right, +% but the output is less readable. +% Here is the produced file {\tt heapsort.ml}: + +% \begin{verbatim} +% type nat = +% | O +% | S of nat + +% type 'a sig2 = +% 'a +% (* singleton inductive, whose constructor was exist2 *) + +% type sumbool = +% | Left +% | Right + +% type 'a list = +% | Nil +% | Cons of 'a * 'a list + +% type 'a multiset = +% 'a -> nat +% (* singleton inductive, whose constructor was Bag *) + +% type 'a merge_lem = +% 'a list +% (* singleton inductive, whose constructor was merge_exist *) + +% (** val merge : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) -> +% 'a1 list -> 'a1 list -> 'a1 merge_lem **) + +% let rec merge leA_dec eqA_dec l1 l2 = +% match l1 with +% | Nil -> l2 +% | Cons (a, l) -> +% let rec f = function +% | Nil -> Cons (a, l) +% | Cons (a0, l3) -> +% (match leA_dec a a0 with +% | Left -> Cons (a, +% (merge leA_dec eqA_dec l (Cons (a0, l3)))) +% | Right -> Cons (a0, (f l3))) +% in f l2 + +% type 'a tree = +% | Tree_Leaf +% | Tree_Node of 'a * 'a tree * 'a tree + +% type 'a insert_spec = +% 'a tree +% (* singleton inductive, whose constructor was insert_exist *) + +% (** val insert : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) -> +% 'a1 tree -> 'a1 -> 'a1 insert_spec **) + +% let rec insert leA_dec eqA_dec t a = +% match t with +% | Tree_Leaf -> Tree_Node (a, Tree_Leaf, Tree_Leaf) +% | Tree_Node (a0, t0, t1) -> +% let h3 = fun x -> insert leA_dec eqA_dec t0 x in +% (match leA_dec a0 a with +% | Left -> Tree_Node (a0, t1, (h3 a)) +% | Right -> Tree_Node (a, t1, (h3 a0))) + +% type 'a build_heap = +% 'a tree +% (* singleton inductive, whose constructor was heap_exist *) + +% (** val list_to_heap : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> +% sumbool) -> 'a1 list -> 'a1 build_heap **) + +% let rec list_to_heap leA_dec eqA_dec = function +% | Nil -> Tree_Leaf +% | Cons (a, l0) -> +% insert leA_dec eqA_dec (list_to_heap leA_dec eqA_dec l0) a + +% type 'a flat_spec = +% 'a list +% (* singleton inductive, whose constructor was flat_exist *) + +% (** val heap_to_list : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> +% sumbool) -> 'a1 tree -> 'a1 flat_spec **) + +% let rec heap_to_list leA_dec eqA_dec = function +% | Tree_Leaf -> Nil +% | Tree_Node (a, t0, t1) -> Cons (a, +% (merge leA_dec eqA_dec (heap_to_list leA_dec eqA_dec t0) +% (heap_to_list leA_dec eqA_dec t1))) + +% (** val treesort : ('a1 -> 'a1 -> sumbool) -> ('a1 -> 'a1 -> sumbool) +% -> 'a1 list -> 'a1 list sig2 **) + +% let treesort leA_dec eqA_dec l = +% heap_to_list leA_dec eqA_dec (list_to_heap leA_dec eqA_dec l) + +% \end{verbatim} + +% Let's test it: +% % Format.set_margin 72;; +% \begin{verbatim} +% # #use "heapsort.ml";; +% type sumbool = Left | Right +% type nat = O | S of nat +% type 'a tree = Tree_Leaf | Tree_Node of 'a * 'a tree * 'a tree +% type 'a list = Nil | Cons of 'a * 'a list +% val merge : +% ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a list -> 'a list = +% val heap_to_list : +% ('a -> 'a -> sumbool) -> 'b -> 'a tree -> 'a list = +% val insert : +% ('a -> 'a -> sumbool) -> 'b -> 'a tree -> 'a -> 'a tree = +% val list_to_heap : +% ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a tree = +% val treesort : +% ('a -> 'a -> sumbool) -> 'b -> 'a list -> 'a list = +% \end{verbatim} + +% One can remark that the argument of {\tt treesort} corresponding to +% {\tt eqAdec} is never used in the informative part of the terms, +% only in the logical parts. So the extracted {\tt treesort} never use +% it, hence this {\tt 'b} argument. We will use {\tt ()} for this +% argument. Only remains the {\tt leAdec} +% argument (of type {\tt 'a -> 'a -> sumbool}) to really provide. + +% \begin{verbatim} +% # let leAdec x y = if x <= y then Left else Right;; +% val leAdec : 'a -> 'a -> sumbool = +% # let rec listn = function 0 -> Nil +% | n -> Cons(Random.int 10000,listn (n-1));; +% val listn : int -> int list = +% # treesort leAdec () (listn 9);; +% - : int list = Cons (160, Cons (883, Cons (1874, Cons (3275, Cons +% (5392, Cons (7320, Cons (8512, Cons (9632, Cons (9876, Nil))))))))) +% \end{verbatim} + +% Some tests on longer lists (10000 elements) show that the program is +% quite efficient for Caml code. + + +% \asubsection{The Standard Library} + +% As a test, we propose an automatic extraction of the +% Standard Library of \Coq. In particular, we will find back the +% two previous examples, {\tt Euclid} and {\tt Heapsort}. +% Go to directory {\tt contrib/extraction/test} of the sources of \Coq, +% and run commands: +% \begin{verbatim} +% make tree; make +% \end{verbatim} +% This will extract all Standard Library files and compile them. +% It is done via many {\tt Extraction Module}, with some customization +% (see subdirectory {\tt custom}). + +% %The result of this extraction of the Standard Library can be browsed +% %at URL +% %\begin{flushleft} +% %\url{http://www.lri.fr/~letouzey/extraction}. +% %\end{flushleft} + +% %Reals theory is normally not extracted, since it is an axiomatic +% %development. We propose nonetheless a dummy realization of those +% %axioms, to test, run: \\ +% % +% %\mbox{\tt make reals}\\ + +% This test works also with Haskell. In the same directory, run: +% \begin{verbatim} +% make tree; make -f Makefile.haskell +% \end{verbatim} +% The haskell compiler currently used is {\tt hbc}. Any other should +% also work, just adapt the {\tt Makefile.haskell}. In particular {\tt +% ghc} is known to work. + +% \asubsection{Extraction's horror museum} + +% Some pathological examples of extraction are grouped in the file +% \begin{verbatim} +% contrib/extraction/test_extraction.v +% \end{verbatim} +% of the sources of \Coq. + +% \asubsection{Users' Contributions} + +% Several of the \Coq\ Users' Contributions use extraction to produce +% certified programs. In particular the following ones have an automatic +% extraction test (just run {\tt make} in those directories): + +% \begin{itemize} +% \item Bordeaux/Additions +% \item Bordeaux/EXCEPTIONS +% \item Bordeaux/SearchTrees +% \item Dyade/BDDS +% \item Lannion +% \item Lyon/CIRCUITS +% \item Lyon/FIRING-SQUAD +% \item Marseille/CIRCUITS +% \item Muenchen/Higman +% \item Nancy/FOUnify +% \item Rocq/ARITH/Chinese +% \item Rocq/COC +% \item Rocq/GRAPHS +% \item Rocq/HIGMAN +% \item Sophia-Antipolis/Stalmarck +% \item Suresnes/BDD +% \end{itemize} + +% Lannion, Rocq/HIGMAN and Lyon/CIRCUITS are a bit particular. They are +% the only examples of developments where {\tt Obj.magic} are needed. +% This is probably due to an heavy use of impredicativity. +% After compilation those two examples run nonetheless, +% thanks to the correction of the extraction~\cite{Let02}. + +% $Id: Program.tex 8688 2006-04-07 15:08:12Z msozeau $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-add.tex b/doc/refman/RefMan-add.tex new file mode 100644 index 00000000..a4fdf0cd --- /dev/null +++ b/doc/refman/RefMan-add.tex @@ -0,0 +1,54 @@ +\chapter{List of additional documentation}\label{Addoc} + +\section{Tutorials}\label{Tutorial} +A companion volume to this reference manual, the \Coq\ Tutorial, is +aimed at gently introducing new users to developing proofs in \Coq\ +without assuming prior knowledge of type theory. In a second step, the +user can read also the tutorial on recursive types (document {\tt +RecTutorial.ps}). + +\section{The \Coq\ standard library}\label{Addoc-library} +A brief description of the \Coq\ standard library is given in the additional +document {\tt Library.dvi}. + +\section{Installation and un-installation procedures}\label{Addoc-install} +A \verb!INSTALL! file in the distribution explains how to install +\Coq. + +\section{{\tt Extraction} of programs}\label{Addoc-extract} +{\tt Extraction} is a package offering some special facilities to +extract ML program files. It is described in the separate document +{\tt Extraction.dvi} +\index{Extraction of programs} + +\section{Proof printing in {\tt Natural} language}\label{Addoc-natural} +{\tt Natural} is a tool to print proofs in natural language. +It is described in the separate document {\tt Natural.dvi}. +\index{Natural@{\tt Print Natural}} +\index{Printing in natural language} + +\section{The {\tt Omega} decision tactic}\label{Addoc-omega} +{\bf Omega} is a tactic to automatically solve arithmetical goals in +Presburger arithmetic (i.e. arithmetic without multiplication). +It is described in the separate document {\tt Omega.dvi}. +\index{Omega@{\tt Omega}} + +\section{Simplification on rings}\label{Addoc-polynom} +A documentation of the package {\tt polynom} (simplification on rings) +can be found in the document {\tt Polynom.dvi} +\index{Polynom@{\tt Polynom}} +\index{Simplification on rings} + +%\section{Anomalies}\label{Addoc-anomalies} +%The separate document {\tt Anomalies.*} gives a list of known +%anomalies and bugs of the system. Before communicating us an +%anomalous behavior, please check first whether it has been already +%reported in this document. + +% $Id: RefMan-add.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-cas.tex b/doc/refman/RefMan-cas.tex new file mode 100644 index 00000000..c79c14e9 --- /dev/null +++ b/doc/refman/RefMan-cas.tex @@ -0,0 +1,692 @@ +%\documentstyle[11pt,../tools/coq-tex/coq,fullpage]{article} + +%\pagestyle{plain} + +%\begin{document} +%\nocite{Augustsson85,wadler87,HuetLevy79,MaSi94,maranget94,Laville91,saidi94,dowek93,Leroy90,puel-suarez90} + +%\input{title} +%\input{macros} +%\coverpage{The Macro \verb+Cases+}{Cristina Cornes} +%\pagestyle{plain} +\chapter{The Macro {\tt Cases}}\label{Cases}\index{Cases@{\tt Cases}} + +\marginparwidth 0pt \oddsidemargin 0pt \evensidemargin 0pt \marginparsep 0pt +\topmargin 0pt \textwidth 6.5in \textheight 8.5in + + +\verb+Cases+ is an extension to the concrete syntax of Coq that allows +to write case expressions using patterns in a syntax close to that of ML languages. +This construction is just a macro that is +expanded during parsing into a sequence of the primitive construction + \verb+Case+. +The current implementation contains two strategies, one for compiling +non-dependent case and another one for dependent case. +\section{Patterns}\label{implementation} +A pattern is a term that indicates the {\em shape} of a value, i.e. a +term where the variables can be seen as holes. When a value is +matched against a pattern (this is called {\em pattern matching}) +the pattern behaves as a filter, and associates a sub-term of the value +to each hole (i.e. to each variable pattern). + + +The syntax of patterns is presented in figure \ref{grammar}\footnote{ +Notation: \{$P$\}$^*$ denotes zero or more repetitions of $P$ and + \{$P$\}$^+$ denotes one or more repetitions of $P$. {\sl command} is the +non-terminal corresponding to terms in Coq.}. +Patterns are built up from constructors and variables. Any identifier +that is not a constructor of an inductive or coinductive type is +considered to be +a variable. Identifiers in patterns should be linear except for +the ``don't care'' pattern denoted by ``\verb+_+''. +We can use patterns to build more complex patterns. +We call {\em simple pattern} a variable or a pattern of the form +$(c~\vec{x})$ where $c$ is a constructor symbol and $\vec{x}$ is a +linear vector of variables. If a pattern is +not simple we call it {\em nested}. + + +A variable pattern matches any value, and the +identifier is bound to that value. The pattern ``\verb+_+'' also matches +any value, but it is not binding. Alias patterns written \verb+(+{\sl pattern} \verb+as+ {\sl +identifier}\verb+)+ are also accepted. This pattern matches the same values as +{\sl pattern} does and +{\sl identifier} is bound to the matched value. +A list of patterns is also considered as a pattern and is called {\em +multiple pattern}. + +\begin{figure}[t] +\begin{center} +\begin{sl} +\begin{tabular}{|l|}\hline \\ +\begin{tabular}{rcl}%\hline && \\ +simple\_pattern & := & pattern \verb+as+ identifier \\ + &$|$ & pattern \verb+,+ pattern \\ + &$|$ & pattern pattern\_list \\ && \\ + +pattern & := & identifier $|$ \verb+(+ simple\_pattern \verb+)+ \\ &&\\ + + +equation & := & \{pattern\}$^+$ ~\verb+=>+ ~term \\ && \\ + +ne\_eqn\_list & := & \verb+|+$^{opt}$ equation~ \{\verb+|+ equation\}$^*$ \\ &&\\ + +eqn\_list & := & \{~equation~ \{\verb+|+ equation\}$^*$~\}$^*$\\ &&\\ + + +term & := & +\verb+Cases+ \{term \}$^+$ \verb+of+ ne\_eqn\_list \verb+end+ \\ +&$|$ & \verb+<+term\verb+>+ \verb+Cases+ \{ term \}$^+$ +\verb+of+ eqn\_list \verb+end+ \\&& %\\ \hline +\end{tabular} \\ \hline +\end{tabular} +\end{sl} \end{center} +\caption{Macro Cases syntax.} +\label{grammar} +\end{figure} + + +Pattern matching improves readability. Compare for example the term +of the function {\em is\_zero} of natural +numbers written with patterns and the one written in primitive +concrete syntax (note that the first bar \verb+|+ is optional)~: + +\begin{center} +\begin{small} +\begin{tabular}{l} +\verb+[n:nat] Cases n of | O => true | _ => false end+,\\ +\verb+[n:nat] Case n of true [_:nat]false end+. +\end{tabular} +\end{small} +\end{center} + +In Coq pattern matching is compiled into the primitive constructions, +thus the expressiveness of the theory remains the same. Once the stage +of parsing has finished patterns disappear. An easy way to see the +result of the expansion is by printing the term with \texttt{Print} if +the term is a constant, or +using the command \texttt{Check} that displays +the term with its type : + +\begin{coq_example} +Check (fun n:nat => match n with + | O => true + | _ => false + end). +\end{coq_example} + + +\verb+Cases+ accepts optionally an infix term enclosed between +brackets \verb+<>+ that we +call the {\em elimination predicate}. +This term is the same argument as the one expected by the primitive +\verb+Case+. Given a pattern matching +expression, if all the right hand sides of \verb+=>+ ({\em rhs} in +short) have the same type, then this term +can be sometimes synthesized, and so we can omit the \verb+<>+. +Otherwise we have to +provide the predicate between \verb+<>+ as for the primitive \verb+Case+. + +Let us illustrate through examples the different aspects of pattern matching. +Consider for example the function that computes the maximum of two +natural numbers. We can write it in primitive syntax by: +\begin{coq_example} +Fixpoint max (n m:nat) {struct m} : nat := + match n with + | O => + (* O *) m + (* S n' *) + | S n' => + match m with + | O => + (* O *) S n' + (* S m' *) + | S m' => S (max n' m') + end + end. +\end{coq_example} + +Using patterns in the definitions gives: + +\begin{coq_example} +Reset max. +Fixpoint max (n m:nat) {struct m} : nat := + match n with + | O => m + | S n' => match m with + | O => S n' + | S m' => S (max n' m') + end + end. +\end{coq_example} + +Another way to write this definition is to use a multiple pattern to + match \verb+n+ and \verb+m+: + +\begin{coq_example} +Reset max. +Fixpoint max (n m:nat) {struct m} : nat := + match n, m with + | O, _ => m + | S n', O => S n' + | S n', S m' => S (max n' m') + end. +\end{coq_example} + + +The strategy examines patterns +from left to right. A case expression is generated {\bf only} when there is at least one constructor in the column of patterns. +For example, +\begin{coq_example} +Check (fun x:nat => match x return nat with + | y => y + end). +\end{coq_example} + + + +We can also use ``\verb+as+ patterns'' to associate a name to a +sub-pattern: + +\begin{coq_example} +Reset max. +Fixpoint max (n m:nat) {struct n} : nat := + match n, m with + | O, _ => m + | S n' as N, O => N + | S n', S m' => S (max n' m') + end. +\end{coq_example} + + +In the previous examples patterns do not conflict with, but +sometimes it is comfortable to write patterns that admits a non +trivial superposition. Consider +the boolean function $lef$ that given two natural numbers +yields \verb+true+ if the first one is less or equal than the second +one and \verb+false+ otherwise. We can write it as follows: + +\begin{coq_example} +Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | x, O => false + | S n, S m => lef n m + end. +\end{coq_example} + +Note that the first and the second multiple pattern superpose because the couple of +values \verb+O O+ matches both. Thus, what is the result of the +function on those values? +To eliminate ambiguity we use the {\em textual priority rule}: we +consider patterns ordered from top to bottom, then a value is matched +by the pattern at the $ith$ row if and only if is not matched by some +pattern of a previous row. Thus in the example, +\verb+O O+ is matched by the first pattern, and so \verb+(lef O O)+ +yields \verb+true+. + +Another way to write this function is: + +\begin{coq_example} +Reset lef. +Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | S n, S m => lef n m + | _, _ => false + end. +\end{coq_example} + + +Here the last pattern superposes with the first two. Because +of the priority rule, the last pattern +will be used only for values that do not match neither the first nor +the second one. + +Terms with useless patterns are accepted by the +system. For example, +\begin{coq_example} +Check + (fun x:nat => match x with + | O => true + | S _ => false + | x => true + end). +\end{coq_example} + +is accepted even though the last pattern is never used. +Beware, the +current implementation rises no warning message when there are unused +patterns in a term. + + + + +\subsection{About patterns of parametric types} +When matching objects of a parametric type, constructors in patterns +{\em do not expect} the parameter arguments. Their value is deduced +during expansion. + +Consider for example the polymorphic lists: + +\begin{coq_example} +Inductive List (A:Set) : Set := + | nil : List A + | cons : A -> List A -> List A. +\end{coq_example} + +We can check the function {\em tail}: + +\begin{coq_example} +Check + (fun l:List nat => + match l with + | nil => nil nat + | cons _ l' => l' + end). +\end{coq_example} + + +When we use parameters in patterns there is an error message: +\begin{coq_example} +Check + (fun l:List nat => + match l with + | nil nat => nil nat + | cons nat _ l' => l' + end). +\end{coq_example} + + + +\subsection{Matching objects of dependent types} +The previous examples illustrate pattern matching on objects of +non-dependent types, but we can also +use the macro to destructure objects of dependent type. +Consider the type \verb+listn+ of lists of a certain length: + +\begin{coq_example} +Inductive listn : nat -> Set := + | niln : listn 0%N + | consn : forall n:nat, nat -> listn n -> listn (S n). +\end{coq_example} + +\subsubsection{Understanding dependencies in patterns} +We can define the function \verb+length+ over \verb+listn+ by : + +\begin{coq_example} +Definition length (n:nat) (l:listn n) := n. +\end{coq_example} + +Just for illustrating pattern matching, +we can define it by case analysis: +\begin{coq_example} +Reset length. +Definition length (n:nat) (l:listn n) := + match l with + | niln => 0%N + | consn n _ _ => S n + end. +\end{coq_example} + +We can understand the meaning of this definition using the +same notions of usual pattern matching. + +Now suppose we split the second pattern of \verb+length+ into two +cases so to give an +alternative definition using nested patterns: +\begin{coq_example} +Definition length1 (n:nat) (l:listn n) := + match l with + | niln => 0%N + | consn n _ niln => S n + | consn n _ (consn _ _ _) => S n + end. +\end{coq_example} + +It is obvious that \verb+length1+ is another version of +\verb+length+. We can also give the following definition: +\begin{coq_example} +Definition length2 (n:nat) (l:listn n) := + match l with + | niln => 0%N + | consn n _ niln => 1%N + | consn n _ (consn m _ _) => S (S m) + end. +\end{coq_example} + +If we forget that \verb+listn+ is a dependent type and we read these +definitions using the usual semantics of pattern matching, we can conclude +that \verb+length1+ +and \verb+length2+ are different functions. +In fact, they are equivalent +because the pattern \verb+niln+ implies that \verb+n+ can only match +the value $0$ and analogously the pattern \verb+consn+ determines that \verb+n+ can +only match values of the form $(S~v)$ where $v$ is the value matched by +\verb+m+. + + +The converse is also true. If +we destructure the length value with the pattern \verb+O+ then the list +value should be $niln$. +Thus, the following term \verb+length3+ corresponds to the function +\verb+length+ but this time defined by case analysis on the dependencies instead of on the list: + +\begin{coq_example} +Definition length3 (n:nat) (l:listn n) := + match l with + | niln => 0%N + | consn O _ _ => 1%N + | consn (S n) _ _ => S (S n) + end. +\end{coq_example} + +When we have nested patterns of dependent types, the semantics of +pattern matching becomes a little more difficult because +the set of values that are matched by a sub-pattern may be conditioned by the +values matched by another sub-pattern. Dependent nested patterns are +somehow constrained patterns. +In the examples, the expansion of +\verb+length1+ and \verb+length2+ yields exactly the same term + but the +expansion of \verb+length3+ is completely different. \verb+length1+ and +\verb+length2+ are expanded into two nested case analysis on +\verb+listn+ while \verb+length3+ is expanded into a case analysis on +\verb+listn+ containing a case analysis on natural numbers inside. + + +In practice the user can think about the patterns as independent and +it is the expansion algorithm that cares to relate them. \\ + + +\subsubsection{When the elimination predicate must be provided} +The examples given so far do not need an explicit elimination predicate +between \verb+<>+ because all the rhs have the same type and the +strategy succeeds to synthesize it. +Unfortunately when dealing with dependent patterns it often happens +that we need to write cases where the type of the rhs are +different instances of the elimination predicate. +The function \verb+concat+ for \verb+listn+ +is an example where the branches have different type +and we need to provide the elimination predicate: + +\begin{coq_example} +Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n return listn (n + m) with + | niln => l' + | consn n' a y => consn (n' + m) a (concat n' y m l') + end. +\end{coq_example} + +Recall that a list of patterns is also a pattern. So, when +we destructure several terms at the same time and the branches have +different type we need to provide +the elimination predicate for this multiple pattern. + +For example, an equivalent definition for \verb+concat+ (even though with a useless extra pattern) would have +been: + +\begin{coq_example} +Reset concat. +Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n, l' return listn (n + m) with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. +\end{coq_example} + +Note that this time, the predicate \verb+[n,_:nat](listn (plus n m))+ is binary because we +destructure both \verb+l+ and \verb+l'+ whose types have arity one. +In general, if we destructure the terms $e_1\ldots e_n$ +the predicate will be of arity $m$ where $m$ is the sum of the +number of dependencies of the type of $e_1, e_2,\ldots e_n$ (the $\lambda$-abstractions +should correspond from left to right to each dependent argument of the +type of $e_1\ldots e_n$). +When the arity of the predicate (i.e. number of abstractions) is not +correct Coq rises an error message. For example: + +\begin{coq_example} +Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l, l' with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. +\end{coq_example} + + +\subsection{Using pattern matching to write proofs} +In all the previous examples the elimination predicate does not depend on the object(s) matched. +The typical case where this is not possible is when we write a proof by +induction or a function that yields an object of dependent type. + +For example, we can write +the function \verb+buildlist+ that given a natural number +$n$ builds a list length $n$ containing zeros as follows: + +\begin{coq_example} +Fixpoint buildlist (n:nat) : listn n := + match n return listn n with + | O => niln + | S n => consn n 0 (buildlist n) + end. +\end{coq_example} + +We can also use multiple patterns whenever the elimination predicate has +the correct arity. + +Consider the following definition of the predicate less-equal +\verb+Le+: + +\begin{coq_example} +Inductive Le : nat -> nat -> Prop := + | LeO : forall n:nat, Le 0%N n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). +\end{coq_example} + +We can use multiple patterns to write the proof of the lemma + \verb+(n,m:nat) (Le n m)\/(Le m n)+: + +\begin{coq_example} +Fixpoint dec (n m:nat) {struct n} : Le n m \/ Le m n := + match n, m return Le n m \/ Le m n with + | O, x => or_introl (Le x 0) (LeO x) + | x, O => or_intror (Le x 0) (LeO x) + | S n as N, S m as M => + match dec n m with + | or_introl h => or_introl (Le M N) (LeS n m h) + | or_intror h => or_intror (Le N M) (LeS m n h) + end + end. +\end{coq_example} +In the example of \verb+dec+ the elimination predicate is binary +because we destructure two arguments of \verb+nat+ that is a +non-dependent type. Note the first \verb+Cases+ is dependent while the +second is not. + +In general, consider the terms $e_1\ldots e_n$, +where the type of $e_i$ is an instance of a family type +$[\vec{d_i}:\vec{D_i}]T_i$ ($1\leq i +\leq n$). Then to write \verb+<+${\cal P}$\verb+>Cases+ $e_1\ldots +e_n$ \verb+of+ \ldots \verb+end+, the +elimination predicate ${\cal P}$ should be of the form: +$[\vec{d_1}:\vec{D_1}][x_1:T_1]\ldots [\vec{d_n}:\vec{D_n}][x_n:T_n]Q.$ + + + + +\section{Extending the syntax of pattern} +The primitive syntax for patterns considers only those patterns containing +symbols of constructors and variables. Nevertheless, we +may define our own syntax for constructors and may be interested in +using this syntax to write patterns. +Because not any term is a pattern, the fact of extending the terms +syntax does not imply the extension of pattern syntax. Thus, +the grammar of patterns should be explicitly extended whenever we +want to use a particular syntax for a constructor. +The grammar rules for the macro \verb+Cases+ (and thus for patterns) +are defined in the file \verb+Multcase.v+ in the directory +\verb+src/syntax+. To extend the grammar of patterns +we need to extend the non-terminals corresponding to patterns +(we refer the reader to chapter of grammar extensions). + + +We have already extended the pattern syntax so as to note +the constructor \verb+pair+ of cartesian product with "( , )" in patterns. +This allows for example, to write the first projection +of pairs as follows: +\begin{coq_example} +Definition fst (A B:Set) (H:A * B) := match H with + | pair x y => x + end. +\end{coq_example} +The grammar presented in figure \ref{grammar} actually +contains this extension. + +\section{When does the expansion strategy fail?}\label{limitations} +The strategy works very like in ML languages when treating +patterns of non-dependent type. +But there are new cases of failure that are due to the presence of +dependencies. + +The error messages of the current implementation may be +sometimes confusing. +When the tactic fails because patterns are somehow incorrect then +error messages refer to the initial expression. But the strategy +may succeed to build an expression whose sub-expressions are well typed but +the whole expression is not. In this situation the message makes +reference to the expanded expression. +We encourage users, when they have patterns with the same outer constructor in different equations, to name the variable patterns in the same positions with the same name. +E.g. to write {\small\verb+(cons n O x) => e1+} and {\small\verb+(cons n \_ x) => e2+} instead of +{\small\verb+(cons n O x) => e1+} and {\small\verb+(cons n' \_ x') => e2+}. This helps to maintain certain name correspondence between the generated expression and the original. + + +Here is a summary of the error messages corresponding to each situation: +\begin{itemize} +\item patterns are incorrect (because constructors are not +applied to the correct number of the arguments, because they are not linear or they are +wrongly typed) +\begin{itemize} +\item \sverb{In pattern } {\sl term} \sverb{the constructor } {\sl ident} +\sverb{expects } {\sl num} \sverb{arguments} + +\item \sverb{The variable } {\sl ident} \sverb{is bound several times in pattern } {\sl term} + +\item \sverb{Constructor pattern: } {\sl term} \sverb{cannot match values of type } {\sl term} +\end{itemize} + +\item the pattern matching is not exhaustive +\begin{itemize} +\item \sverb{This pattern-matching is not exhaustive} +\end{itemize} +\item the elimination predicate provided to \verb+Cases+ has not the expected arity + +\begin{itemize} +\item \sverb{The elimination predicate } {\sl term} \sverb{should be +of arity } {\sl num} \sverb{(for non dependent case) or } {\sl num} \sverb{(for dependent case)} +\end{itemize} + + \item the whole expression is wrongly typed, or the synthesis of implicit arguments fails (for example to find +the elimination predicate or to resolve implicit arguments in the rhs). + + +There are {\em nested patterns of dependent type}, the +elimination predicate corresponds to non-dependent case and has the form $[x_1:T_1]...[x_n:T_n]T$ +and {\bf some} $x_i$ occurs {\bf free} in +$T$. Then, the strategy may fail to find out a correct elimination +predicate during some step of compilation. +In this situation we recommend the user to rewrite the nested +dependent patterns into several \verb+Cases+ with {\em simple patterns}. + +In all these cases we have the following error message: + + \begin{itemize} + \item + {\tt Expansion strategy failed to build a well typed case expression. + There is a branch that mismatches the expected type. + The risen type error on the result of expansion was:} + \end{itemize} + +\item because of nested patterns, it may happen that even though all +the rhs have the same type, the strategy needs +dependent elimination and so an elimination predicate must be +provided. The system +warns about this situation, trying to compile anyway with the +non-dependent strategy. The risen message is: +\begin{itemize} +\item {\tt Warning: This pattern matching may need dependent elimination to be compiled. +I will try, but if fails try again giving dependent elimination predicate.} +\end{itemize} + +\item there are {\em nested patterns of dependent type} and the strategy +builds a term that is well typed but recursive +calls in fix point are reported as illegal: +\begin{itemize} +\item {\tt Error: Recursive call applied to an illegal term ...} +\end{itemize} + +This is because the strategy generates a term that is correct +w.r.t. to the initial term but which does not pass the guard condition. +In this situation we recommend the user to transform the nested dependent +patterns into {\em several \verb+Cases+ of simple patterns}. +Let us explain this with an example. +Consider the following defintion of a function that yields the last +element of a list and \verb+O+ if it is empty: + +\begin{coq_example} +Fixpoint last (n:nat) (l:listn n) {struct l} : nat := + match l with + | consn _ a niln => a + | consn m _ x => last m x + | niln => 0%N + end. +\end{coq_example} + +It fails because of the priority between patterns, we know that this +definition is equivalent to the following more explicit one (which +fails too): + +\begin{coq_example*} +Fixpoint last (n:nat) (l:listn n) {struct l} : nat := + match l with + | consn _ a niln => a + | consn n _ (consn m b x) => last n (consn m b x) + | niln => 0%N + end. +\end{coq_example*} + +Note that the recursive call \sverb{(last n (consn m b x)) } is not +guarded. When treating with patterns of dependent types the strategy +interprets the first definition of \texttt{last} as the second +onefootnote{In languages of the ML family +the first definition would be translated into a term where the +variable \texttt{x} is shared in the expression. When +patterns are of non-dependent types, Coq compiles as in ML languages +using sharing. When patterns are of dependent types the compilation +reconstructs the term as in the second definition of \texttt{last} so to +ensure the result of expansion is well typed.}. +Thus it generates a +term where the recursive call is rejected by the +guard condition. + +You can get rid of this problem by writing the definition with \emph{simple +patterns}: + +\begin{coq_example} +Fixpoint last (n:nat) (l:listn n) {struct l} : nat := + match l return nat with + | consn m a x => match x with + | niln => a + | _ => last m x + end + | niln => 0%N + end. +\end{coq_example} + + +\end{itemize} + +%\end{document} + diff --git a/doc/refman/RefMan-cic.tex b/doc/refman/RefMan-cic.tex new file mode 100644 index 00000000..18b6ed9c --- /dev/null +++ b/doc/refman/RefMan-cic.tex @@ -0,0 +1,1480 @@ +\chapter{Calculus of Inductive Constructions} +\label{Cic} +\index{Cic@\textsc{CIC}} +\index{pCic@p\textsc{CIC}} +\index{Calculus of (Co)Inductive Constructions} + +The underlying formal language of {\Coq} is a {\em Calculus of + Constructions} with {\em Inductive Definitions}. It is presented in +this chapter. +For {\Coq} version V7, this Calculus was known as the +{\em Calculus of (Co)Inductive Constructions}\index{Calculus of + (Co)Inductive Constructions} (\iCIC\ in short). +The underlying calculus of {\Coq} version V8.0 and up is a weaker + calculus where the sort \Set{} satisfies predicative rules. +We call this calculus the +{\em Predicative Calculus of (Co)Inductive + Constructions}\index{Predicative Calculus of + (Co)Inductive Constructions} (\pCIC\ in short). +In section~\ref{impredicativity} we give the extra-rules for \iCIC. A + compiling option of \Coq{} allows to type-check theories in this + extended system. + +In \CIC\, all objects have a {\em type}. There are types for functions (or +programs), there are atomic types (especially datatypes)... but also +types for proofs and types for the types themselves. +Especially, any object handled in the formalism must belong to a +type. For instance, the statement {\it ``for all x, P''} is not +allowed in type theory; you must say instead: {\it ``for all x +belonging to T, P''}. The expression {\it ``x belonging to T''} is +written {\it ``x:T''}. One also says: {\it ``x has type T''}. +The terms of {\CIC} are detailed in section \ref{Terms}. + +In \CIC\, there is an internal reduction mechanism. In particular, it +allows to decide if two programs are {\em intentionally} equal (one +says {\em convertible}). Convertibility is presented in section +\ref{convertibility}. + +The remaining sections are concerned with the type-checking of terms. +The beginner can skip them. + +The reader seeking a background on the Calculus of Inductive +Constructions may read several papers. Giménez~\cite{Gim98} provides +an introduction to inductive and coinductive definitions in Coq. In +their book~\cite{CoqArt}, Bertot and Castéran give a precise +description of the \CIC{} based on numerous practical examples. +Barras~\cite{Bar99}, Werner~\cite{Wer94} and +Paulin-Mohring~\cite{Moh97} are the most recent theses dealing with +Inductive Definitions. Coquand-Huet~\cite{CoHu85a,CoHu85b,CoHu86} +introduces the Calculus of Constructions. Coquand-Paulin~\cite{CoPa89} +extended this calculus to inductive definitions. The {\CIC} is a +formulation of type theory including the possibility of inductive +constructions, Barendregt~\cite{Bar91} studies the modern form of type +theory. + +\section{The terms}\label{Terms} + +In most type theories, one usually makes a syntactic distinction +between types and terms. This is not the case for \CIC\ which defines +both types and terms in the same syntactical structure. This is +because the type-theory itself forces terms and types to be defined in +a mutual recursive way and also because similar constructions can be +applied to both terms and types and consequently can share the same +syntactic structure. + +Consider for instance the $\ra$ constructor and assume \nat\ is the +type of natural numbers. Then $\ra$ is used both to denote +$\nat\ra\nat$ which is the type of functions from \nat\ to \nat, and +to denote $\nat \ra \Prop$ which is the type of unary predicates over +the natural numbers. Consider abstraction which builds functions. It +serves to build ``ordinary'' functions as $\kw{fun}~x:\nat \Ra ({\tt mult} ~x~x)$ (assuming {\tt mult} is already defined) but may build also +predicates over the natural numbers. For instance $\kw{fun}~x:\nat \Ra +(x=x)$ will +represent a predicate $P$, informally written in mathematics +$P(x)\equiv x=x$. If $P$ has type $\nat \ra \Prop$, $(P~x)$ is a +proposition, furthermore $\kw{forall}~x:\nat,(P~x)$ will represent the type of +functions which associate to each natural number $n$ an object of +type $(P~n)$ and consequently represent proofs of the formula +``$\forall x.P(x)$''. + +\subsection{Sorts}\label{Sorts} +\index{Sorts} +Types are seen as terms of the language and then should belong to +another type. The type of a type is always a constant of the language +called a {\em sort}. + +The two basic sorts in the language of \CIC\ are \Set\ and \Prop. + +The sort \Prop\ intends to be the type of logical propositions. If +$M$ is a logical proposition then it denotes a class, namely the class +of terms representing proofs of $M$. An object $m$ belonging to $M$ +witnesses the fact that $M$ is true. An object of type \Prop\ is +called a {\em proposition}. + +The sort \Set\ intends to be the type of specifications. This includes +programs and the usual sets such as booleans, naturals, lists +etc. + +These sorts themselves can be manipulated as ordinary terms. +Consequently sorts also should be given a type. Because assuming +simply that \Set\ has type \Set\ leads to an inconsistent theory, we +have infinitely many sorts in the language of \CIC. These are, in +addition to \Set\ and \Prop\, a hierarchy of universes \Type$(i)$ +for any integer $i$. We call \Sort\ the set of sorts +which is defined by: +\[\Sort \equiv \{\Prop,\Set,\Type(i)| i \in \NN\} \] +\index{Type@{\Type}} +\index{Prop@{\Prop}} +\index{Set@{\Set}} +The sorts enjoy the following properties: {\Prop:\Type(0)}, {\Set:\Type(0)} and + {\Type$(i)$:\Type$(i+1)$}. + +The user will never mention explicitly the index $i$ when referring to +the universe \Type$(i)$. One only writes \Type. The +system itself generates for each instance of \Type\ a new +index for the universe and checks that the constraints between these +indexes can be solved. From the user point of view we consequently +have {\sf Type :Type}. + +We shall make precise in the typing rules the constraints between the +indexes. + +\subsection{Constants} +Besides the sorts, the language also contains constants denoting +objects in the environment. These constants may denote previously +defined objects but also objects related to inductive definitions +(either the type itself or one of its constructors or destructors). + +\medskip\noindent {\bf Remark. } In other presentations of \CIC, +the inductive objects are not seen as +external declarations but as first-class terms. Usually the +definitions are also completely ignored. This is a nice theoretical +point of view but not so practical. An inductive definition is +specified by a possibly huge set of declarations, clearly we want to +share this specification among the various inductive objects and not +to duplicate it. So the specification should exist somewhere and the +various objects should refer to it. We choose one more level of +indirection where the objects are just represented as constants and +the environment gives the information on the kind of object the +constant refers to. + +\medskip +Our inductive objects will be manipulated as constants declared in the +environment. This roughly corresponds to the way they are actually +implemented in the \Coq\ system. It is simple to map this presentation +in a theory where inductive objects are represented by terms. + +\subsection{Terms} + +Terms are built from variables, global names, constructors, +abstraction, application, local declarations bindings (``let-in'' +expressions) and product. + +From a syntactic point of view, types cannot be distinguished from terms, +except that they cannot start by an abstraction, and that if a term is +a sort or a product, it should be a type. + +More precisely the language of the {\em Calculus of Inductive + Constructions} is built from the following rules: + +\begin{enumerate} +\item the sorts {\sf Set, Prop, Type} are terms. +\item names for global constants of the environment are terms. +\item variables are terms. +\item if $x$ is a variable and $T$, $U$ are terms then $\forall~x:T,U$ + ($\kw{forall}~x:T,U$ in \Coq{} concrete syntax) is a term. If $x$ + occurs in $U$, $\forall~x:T,U$ reads as {\it ``for all x of type T, + U''}. As $U$ depends on $x$, one says that $\forall~x:T,U$ is a + {\em dependent product}. If $x$ doesn't occurs in $U$ then + $\forall~x:T,U$ reads as {\it ``if T then U''}. A non dependent + product can be written: $T \rightarrow U$. +\item if $x$ is a variable and $T$, $U$ are terms then $\lb~x:T \mto U$ + ($\kw{fun}~x:T\Ra U$ in \Coq{} concrete syntax) is a term. This is a + notation for the $\lambda$-abstraction of + $\lambda$-calculus\index{lambda-calculus@$\lambda$-calculus} + \cite{Bar81}. The term $\lb~x:T \mto U$ is a function which maps + elements of $T$ to $U$. +\item if $T$ and $U$ are terms then $(T\ U)$ is a term + ($T~U$ in \Coq{} concrete syntax). The term $(T\ + U)$ reads as {\it ``T applied to U''}. +\item if $x$ is a variable, and $T$, $U$ are terms then + $\kw{let}~x:=T~\kw{in}~U$ is a + term which denotes the term $U$ where the variable $x$ is locally + bound to $T$. This stands for the common ``let-in'' construction of + functional programs such as ML or Scheme. +\end{enumerate} + +\paragraph{Notations.} Application associates to the left such that +$(t~t_1\ldots t_n)$ represents $(\ldots (t~t_1)\ldots t_n)$. The +products and arrows associate to the right such that $\forall~x:A,B\ra C\ra +D$ represents $\forall~x:A,(B\ra (C\ra D))$. One uses sometimes +$\forall~x~y:A,B$ or +$\lb~x~y:A\mto B$ to denote the abstraction or product of several variables +of the same type. The equivalent formulation is $\forall~x:A, \forall y:A,B$ or +$\lb~x:A \mto \lb y:A \mto B$ + +\paragraph{Free variables.} +The notion of free variables is defined as usual. In the expressions +$\lb~x:T\mto U$ and $\forall x:T, U$ the occurrences of $x$ in $U$ +are bound. They are represented by de Bruijn indexes in the internal +structure of terms. + +\paragraph{Substitution.} \index{Substitution} +The notion of substituting a term $t$ to free occurrences of a +variable $x$ in a term $u$ is defined as usual. The resulting term +is written $\subst{u}{x}{t}$. + + +\section{Typed terms}\label{Typed-terms} + +As objects of type theory, terms are subjected to {\em type +discipline}. The well typing of a term depends on an environment which +consists in a global environment (see below) and a local context. + +\paragraph{Local context.} +A {\em local context} (or shortly context) is an ordered list of +declarations of variables. The declaration of some variable $x$ is +either an assumption, written $x:T$ ($T$ is a type) or a definition, +written $x:=t:T$. We use brackets to write contexts. A +typical example is $[x:T;y:=u:U;z:V]$. Notice that the variables +declared in a context must be distinct. If $\Gamma$ declares some $x$, +we write $x \in\Gamma$. By writing $(x:T)\in\Gamma$ we mean that +either $x:T$ is an assumption in $\Gamma$ or that there exists some $t$ such +that $x:=t:T$ is a definition in $\Gamma$. If $\Gamma$ defines some +$x:=t:T$, we also write $(x:=t:T)\in\Gamma$. Contexts must be +themselves {\em well formed}. For the rest of the chapter, the +notation $\Gamma::(y:T)$ (resp $\Gamma::(y:=t:T)$) denotes the context +$\Gamma$ enriched with the declaration $y:T$ (resp $y:=t:T$). The +notation $[]$ denotes the empty context. \index{Context} + +% Does not seem to be used further... +% Si dans l'explication WF(E)[Gamma] concernant les constantes +% definies ds un contexte + +We define the inclusion of two contexts $\Gamma$ and $\Delta$ (written +as $\Gamma \subset \Delta$) as the property, for all variable $x$, +type $T$ and term $t$, if $(x:T) \in \Gamma$ then $(x:T)\in \Delta$ +and if $(x:=t:T) \in \Gamma$ then $(x:=t:T)\in \Delta$. +%We write +% $|\Delta|$ for the length of the context $\Delta$, that is for the number +% of declarations (assumptions or definitions) in $\Delta$. + +A variable $x$ is said to be free in $\Gamma$ if $\Gamma$ contains a +declaration $y:T$ such that $x$ is free in $T$. + +\paragraph{Environment.}\index{Environment} +Because we are manipulating global declarations (constants and global +assumptions), we also need to consider a global environment $E$. + +An environment is an ordered list of declarations of global +names. Declarations are either assumptions or ``standard'' +definitions, that is abbreviations for well-formed terms +but also definitions of inductive objects. In the latter +case, an object in the environment will define one or more constants +(that is types and constructors, see section \ref{Cic-inductive-definitions}). + +An assumption will be represented in the environment as +\Assum{\Gamma}{c}{T} which means that $c$ is assumed of some type $T$ +well-defined in some context $\Gamma$. An (ordinary) definition will +be represented in the environment as \Def{\Gamma}{c}{t}{T} which means +that $c$ is a constant which is valid in some context $\Gamma$ whose +value is $t$ and type is $T$. + +The rules for inductive definitions (see section +\ref{Cic-inductive-definitions}) have to be considered as assumption +rules to which the following definitions apply: if the name $c$ is +declared in $E$, we write $c \in E$ and if $c:T$ or $c:=t:T$ is +declared in $E$, we write $(c : T) \in E$. + +\paragraph{Typing rules.}\label{Typing-rules}\index{Typing rules} +In the following, we assume $E$ is a valid environment wrt to +inductive definitions. We define simultaneously two +judgments. The first one \WTEG{t}{T} means the term $t$ is well-typed +and has type $T$ in the environment $E$ and context $\Gamma$. The +second judgment \WFE{\Gamma} means that the environment $E$ is +well-formed and the context $\Gamma$ is a valid context in this +environment. It also means a third property which makes sure that any +constant in $E$ was defined in an environment which is included in +$\Gamma$ +\footnote{This requirement could be relaxed if we instead introduced + an explicit mechanism for instantiating constants. At the external + level, the Coq engine works accordingly to this view that all the + definitions in the environment were built in a sub-context of the + current context.}. + +A term $t$ is well typed in an environment $E$ iff there exists a +context $\Gamma$ and a term $T$ such that the judgment \WTEG{t}{T} can +be derived from the following rules. +\begin{description} +\item[W-E] \inference{\WF{[]}{[]}} +\item[W-S] % Ce n'est pas vrai : x peut apparaitre plusieurs fois dans Gamma +\inference{\frac{\WTEG{T}{s}~~~~s\in \Sort~~~~x \not\in + \Gamma % \cup E + } + {\WFE{\Gamma::(x:T)}}~~~~~ + \frac{\WTEG{t}{T}~~~~x \not\in + \Gamma % \cup E + }{\WFE{\Gamma::(x:=t:T)}}} +\item[Def] \inference{\frac{\WTEG{t}{T}~~~c \notin E\cup \Gamma} + {\WF{E;\Def{\Gamma}{c}{t}{T}}{\Gamma}}} +\item[Ax] \index{Typing rules!Ax} +\inference{\frac{\WFE{\Gamma}}{\WTEG{\Prop}{\Type(p)}}~~~~~ +\frac{\WFE{\Gamma}}{\WTEG{\Set}{\Type(q)}}} +\inference{\frac{\WFE{\Gamma}~~~~i nat. +Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. +\end{coq_example*} +\begin{coq_example*} +Inductive Length (A:Set) : list A -> nat -> Prop := + | Lnil : Length A (nil A) O + | Lcons : + forall (a:A) (l:list A) (n:nat), + Length A l n -> Length A (cons A a l) (S n). +Inductive tree : Set := + node : forest -> tree +with forest : Set := + | emptyf : forest + | consf : tree -> forest -> forest. +\end{coq_example*} +The inductive declaration in \Coq\ is slightly different from the one +we described theoretically. The difference is that in the type of +constructors the inductive definition is explicitly applied to the +parameters variables. The \Coq\ type-checker verifies that all +parameters are applied in the correct manner in each recursive call. +In particular, the following definition will not be accepted because +there is an occurrence of \List\ which is not applied to the parameter +variable: +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is not correct and should produce **********) +(********* Error: The 1st argument of list' must be A in ... *********) +\end{coq_eval} +\begin{coq_example} +Inductive list' (A:Set) : Set := + | nil' : list' A + | cons' : A -> list' (A -> A) -> list' A. +\end{coq_example} + +\subsection{Types of inductive objects} +We have to give the type of constants in an environment $E$ which +contains an inductive declaration. + +\begin{description} +\item[Ind-Const] Assuming $\Gamma_P$ is $[p_1:P_1;\ldots;p_r:P_r]$, + $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is + $[c_1:C_1;\ldots;c_n:C_n]$, + +\inference{\frac{\Ind{\Gamma}{\Gamma_P}{\Gamma_I}{\Gamma_C} \in E + ~~j=1\ldots k}{(I_j:\forall~p_1:P_1,\ldots\forall p_r:P_r,A_j) \in E}} + +\inference{\frac{\Ind{\Gamma}{\Gamma_P}{\Gamma_I}{\Gamma_C} \in E + ~~~~i=1.. n} + {(c_i:\forall~p_1:P_1,\ldots \forall p_r:P_r,\subst{C_i}{I_j}{(I_j~p_1\ldots + p_r)}_{j=1\ldots k})\in E}} +\end{description} + +\paragraph{Example.} +We have $(\List:\Set \ra \Set), (\cons:\forall~A:\Set,A\ra(\List~A)\ra +(\List~A))$, \\ +$(\Length:\forall~A:\Set, (\List~A)\ra\nat\ra\Prop)$, $\tree:\Set$ and $\forest:\Set$. + +From now on, we write $\ListA$ instead of $(\List~A)$ and $\LengthA$ +for $(\Length~A)$. + +%\paragraph{Parameters.} +%%The parameters introduce a distortion between the inside specification +%%of the inductive declaration where parameters are supposed to be +%%instantiated (this representation is appropriate for checking the +%%correctness or deriving the destructor principle) and the outside +%%typing rules where the inductive objects are seen as objects +%%abstracted with respect to the parameters. + +%In the definition of \List\ or \Length\, $A$ is a parameter because +%what is effectively inductively defined is $\ListA$ or $\LengthA$ for +%a given $A$ which is constant in the type of constructors. But when +%we define $(\LengthA~l~n)$, $l$ and $n$ are not parameters because the +%constructors manipulate different instances of this family. + +\subsection{Well-formed inductive definitions} +We cannot accept any inductive declaration because some of them lead +to inconsistent systems. We restrict ourselves to definitions which +satisfy a syntactic criterion of positivity. Before giving the formal +rules, we need a few definitions: + +\paragraph{Definitions}\index{Positivity}\label{Positivity} + +A type $T$ is an {\em arity of sort $s$}\index{Arity} if it converts +to the sort $s$ or to a product $\forall~x:T,U$ with $U$ an arity +of sort $s$. (For instance $A\ra \Set$ or $\forall~A:\Prop,A\ra +\Prop$ are arities of sort respectively \Set\ and \Prop). A {\em type + of constructor of $I$}\index{Type of constructor} is either a term +$(I~t_1\ldots ~t_n)$ or $\fa x:T,C$ with $C$ a {\em type of constructor + of $I$}. + +\smallskip + +The type of constructor $T$ will be said to {\em satisfy the positivity +condition} for a constant $X$ in the following cases: + +\begin{itemize} +\item $T=(X~t_1\ldots ~t_n)$ and $X$ does not occur free in +any $t_i$ +\item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and +the type $V$ satisfies the positivity condition for $X$ +\end{itemize} + +The constant $X$ {\em occurs strictly positively} in $T$ in the +following cases: + +\begin{itemize} +\item $X$ does not occur in $T$ +\item $T$ converts to $(X~t_1 \ldots ~t_n)$ and $X$ does not occur in + any of $t_i$ +\item $T$ converts to $\forall~x:U,V$ and $X$ does not occur in + type $U$ but occurs strictly positively in type $V$ +\item $T$ converts to $(I~a_1 \ldots ~a_m ~ t_1 \ldots ~t_p)$ where + $I$ is the name of an inductive declaration of the form + $\Ind{\Gamma}{p_1:P_1;\ldots;p_m:P_m}{I:A}{c_1:C_1;\ldots;c_n:C_n}$ + (in particular, it is not mutually defined and it has $m$ + parameters) and $X$ does not occur in any of the $t_i$, and the + types of constructor $C_i\{p_j/a_j\}_{j=1\ldots m}$ of $I$ satisfy + the nested positivity condition for $X$ +%\item more generally, when $T$ is not a type, $X$ occurs strictly +%positively in $T[x:U]u$ if $X$ does not occur in $U$ but occurs +%strictly positively in $u$ +\end{itemize} + +The type of constructor $T$ of $I$ {\em satisfies the nested +positivity condition} for a constant $X$ in the following +cases: + +\begin{itemize} +\item $T=(I~t_1\ldots ~t_n)$ and $X$ does not occur in +any $t_i$ +\item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and +the type $V$ satisfies the nested positivity condition for $X$ +\end{itemize} + +\paragraph{Example} + +$X$ occurs strictly positively in $A\ra X$ or $X*A$ or $({\tt list} +X)$ but not in $X \ra A$ or $(X \ra A)\ra A$ nor $({\tt neg}~A)$ +assuming the notion of product and lists were already defined and {\tt + neg} is an inductive definition with declaration \Ind{}{A:\Set}{{\tt + neg}:\Set}{{\tt neg}:(A\ra{\tt False}) \ra {\tt neg}}. Assuming +$X$ has arity ${\tt nat \ra Prop}$ and {\tt ex} is the inductively +defined existential quantifier, the occurrence of $X$ in ${\tt (ex~ + nat~ \lb~n:nat\mto (X~ n))}$ is also strictly positive. + +\paragraph{Correctness rules.} +We shall now describe the rules allowing the introduction of a new +inductive definition. + +\begin{description} +\item[W-Ind] Let $E$ be an environment and + $\Gamma,\Gamma_P,\Gamma_I,\Gamma_C$ are contexts such that + $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$ and $\Gamma_C$ is + $[c_1:C_1;\ldots;c_n:C_n]$. +\inference{ + \frac{ + (\WTE{\Gamma;\Gamma_P}{A_j}{s'_j})_{j=1\ldots k} + ~~ (\WTE{\Gamma;\Gamma_P;\Gamma_I}{C_i}{s_{p_i}})_{i=1\ldots n} +} + {\WF{E;\Ind{\Gamma}{\Gamma_P}{\Gamma_I}{\Gamma_C}}{\Gamma}}} +providing the following side conditions hold: +\begin{itemize} +\item $k>0$, $I_j$, $c_i$ are different names for $j=1\ldots k$ and $i=1\ldots n$, +\item for $j=1\ldots k$ we have $A_j$ is an arity of sort $s_j$ and $I_j + \notin \Gamma \cup E$, +\item for $i=1\ldots n$ we have $C_i$ is a type of constructor of + $I_{p_i}$ which satisfies the positivity condition for $I_1 \ldots I_k$ + and $c_i \notin \Gamma \cup E$. +\end{itemize} +\end{description} +One can remark that there is a constraint between the sort of the +arity of the inductive type and the sort of the type of its +constructors which will always be satisfied for the impredicative sort +(\Prop) but may fail to define inductive definition +on sort \Set{} and generate constraints between universes for +inductive definitions in types. + +\paragraph{Examples} +It is well known that existential quantifier can be encoded as an +inductive definition. +The following declaration introduces the second-order existential +quantifier $\exists X.P(X)$. +\begin{coq_example*} +Inductive exProp (P:Prop->Prop) : Prop + := exP_intro : forall X:Prop, P X -> exProp P. +\end{coq_example*} +The same definition on \Set{} is not allowed and fails~: +\begin{coq_eval} +(********** The following is not correct and should produce **********) +(*** Error: Large non-propositional inductive types must be in Type***) +\end{coq_eval} +\begin{coq_example} +Inductive exSet (P:Set->Prop) : Set + := exS_intro : forall X:Set, P X -> exSet P. +\end{coq_example} +It is possible to declare the same inductive definition in the +universe \Type. +The \texttt{exType} inductive definition has type $(\Type_i \ra\Prop)\ra +\Type_j$ with the constraint that the parameter \texttt{X} of \texttt{exT\_intro} has type $\Type_k$ with $kProp) : Type + := exT_intro : forall X:Type, P X -> exType P. +\end{coq_example*} +%We shall assume for the following definitions that, if necessary, we +%annotated the type of constructors such that we know if the argument +%is recursive or not. We shall write the type $(x:_R T)C$ if it is +%a recursive argument and $(x:_P T)C$ if the argument is not recursive. + +\subsection{Destructors} +The specification of inductive definitions with arities and +constructors is quite natural. But we still have to say how to use an +object in an inductive type. + +This problem is rather delicate. There are actually several different +ways to do that. Some of them are logically equivalent but not always +equivalent from the computational point of view or from the user point +of view. + +From the computational point of view, we want to be able to define a +function whose domain is an inductively defined type by using a +combination of case analysis over the possible constructors of the +object and recursion. + +Because we need to keep a consistent theory and also we prefer to keep +a strongly normalizing reduction, we cannot accept any sort of +recursion (even terminating). So the basic idea is to restrict +ourselves to primitive recursive functions and functionals. + +For instance, assuming a parameter $A:\Set$ exists in the context, we +want to build a function \length\ of type $\ListA\ra \nat$ which +computes the length of the list, so such that $(\length~\Nil) = \nO$ +and $(\length~(\cons~A~a~l)) = (\nS~(\length~l))$. We want these +equalities to be recognized implicitly and taken into account in the +conversion rule. + +From the logical point of view, we have built a type family by giving +a set of constructors. We want to capture the fact that we do not +have any other way to build an object in this type. So when trying to +prove a property $(P~m)$ for $m$ in an inductive definition it is +enough to enumerate all the cases where $m$ starts with a different +constructor. + +In case the inductive definition is effectively a recursive one, we +want to capture the extra property that we have built the smallest +fixed point of this recursive equation. This says that we are only +manipulating finite objects. This analysis provides induction +principles. + +For instance, in order to prove $\forall l:\ListA,(\LengthA~l~(\length~l))$ +it is enough to prove: + +\noindent $(\LengthA~\Nil~(\length~\Nil))$ and + +\smallskip +$\forall a:A, \forall l:\ListA, (\LengthA~l~(\length~l)) \ra +(\LengthA~(\cons~A~a~l)~(\length~(\cons~A~a~l)))$. +\smallskip + +\noindent which given the conversion equalities satisfied by \length\ is the +same as proving: +$(\LengthA~\Nil~\nO)$ and $\forall a:A, \forall l:\ListA, +(\LengthA~l~(\length~l)) \ra +(\LengthA~(\cons~A~a~l)~(\nS~(\length~l)))$. + +One conceptually simple way to do that, following the basic scheme +proposed by Martin-L\"of in his Intuitionistic Type Theory, is to +introduce for each inductive definition an elimination operator. At +the logical level it is a proof of the usual induction principle and +at the computational level it implements a generic operator for doing +primitive recursion over the structure. + +But this operator is rather tedious to implement and use. We choose in +this version of Coq to factorize the operator for primitive recursion +into two more primitive operations as was first suggested by Th. Coquand +in~\cite{Coq92}. One is the definition by pattern-matching. The second one is a definition by guarded fixpoints. + +\subsubsection{The {\tt match\ldots with \ldots end} construction.} +\label{Caseexpr} +\index{match@{\tt match\ldots with\ldots end}} + +The basic idea of this destructor operation is that we have an object +$m$ in an inductive type $I$ and we want to prove a property $(P~m)$ +which in general depends on $m$. For this, it is enough to prove the +property for $m = (c_i~u_1\ldots u_{p_i})$ for each constructor of $I$. + +The \Coq{} term for this proof will be written~: +\[\kw{match}~m~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ + (c_n~x_{n1}...x_{np_n}) \Ra f_n~ \kw{end}\] +In this expression, if +$m$ is a term built from a constructor $(c_i~u_1\ldots u_{p_i})$ then +the expression will behave as it is specified with $i$-th branch and +will reduce to $f_i$ where the $x_{i1}$\ldots $x_{ip_i}$ are replaced +by the $u_1\ldots u_p$ according to the $\iota$-reduction. + +Actually, for type-checking a \kw{match\ldots with\ldots end} +expression we also need to know the predicate $P$ to be proved by case +analysis. \Coq{} can sometimes infer this predicate but sometimes +not. The concrete syntax for describing this predicate uses the +\kw{as\ldots return} construction. +The predicate is made explicit using the syntax~: +\[\kw{match}~m~\kw{as}~ x~ \kw{return}~ (P~ x) ~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ + (c_n~x_{n1}...x_{np_n}) \Ra f_n \kw{end}\] +For the purpose of presenting the inference rules, we use a more +compact notation~: +\[ \Case{(\lb x \mto P)}{m}{ \lb x_{11}~...~x_{1p_1} \mto f_1 ~|~\ldots~|~ + \lb x_{n1}...x_{np_n} \mto f_n}\] + +This is the basic idea which is generalized to the case where $I$ is +an inductively defined $n$-ary relation (in which case the property +$P$ to be proved will be a $n+1$-ary relation). + + +\paragraph{Non-dependent elimination.} +When defining a function by case analysis, we build an object of type $I +\ra C$ and the minimality principle on an inductively defined logical +predicate of type $A \ra \Prop$ is often used to prove a property +$\forall x:A,(I~x)\ra (C~x)$. This is a particular case of the dependent +principle that we stated before with a predicate which does not depend +explicitly on the object in the inductive definition. + +For instance, a function testing whether a list is empty +can be +defined as: + +\[\lb~l:\ListA \mto\Case{\bool}{l}{\Nil~ \Ra~\true~ |~ (\cons~a~m)~ \Ra~\false}\] +%\noindent {\bf Remark. } + +% In the system \Coq\ the expression above, can be +% written without mentioning +% the dummy abstraction: +% \Case{\bool}{l}{\Nil~ \mbox{\tt =>}~\true~ |~ (\cons~a~m)~ +% \mbox{\tt =>}~ \false} + +\paragraph{Allowed elimination sorts.} +\index{Elimination sorts} + +An important question for building the typing rule for \kw{match} is +what can be the type of $P$ with respect to the type of the inductive +definitions. + +We define now a relation \compat{I:A}{B} between an inductive +definition $I$ of type $A$, an arity $B$ which says that an object in +the inductive definition $I$ can be eliminated for proving a property +$P$ of type $B$. + +The case of inductive definitions in sorts \Set\ or \Type{} is simple. +There is no restriction on the sort of the predicate to be +eliminated. + +\paragraph{Notations.} +The \compat{I:A}{B} is defined as the smallest relation satisfying the +following rules: +We write \compat{I}{B} for \compat{I:A}{B} where $A$ is the type of +$I$. + +\begin{description} +\item[Prod] \inference{\frac{\compat{(I~x):A'}{B'}} + {\compat{I:(x:A)A'}{(x:A)B'}}} +\item[\Set \& \Type] \inference{\frac{ + s_1 \in \{\Set,\Type(j)\}, + s_2 \in \Sort}{\compat{I:s_1}{I\ra s_2}}} +\end{description} + +The case of Inductive Definitions of sort \Prop{} is a bit more +complicated, because of our interpretation of this sort. The only +harmless allowed elimination, is the one when predicate $P$ is also of +sort \Prop. +\begin{description} +\item[\Prop] \inference{\compat{I:\Prop}{I\ra\Prop}} +\end{description} +\Prop{} is the type of logical propositions, the proofs of properties +$P$ in \Prop{} could not be used for computation and are consequently +ignored by the extraction mechanism. +Assume $A$ and $B$ are two propositions, and the logical disjunction +$A\vee B$ is defined inductively by~: +\begin{coq_example*} +Inductive or (A B:Prop) : Prop := + lintro : A -> or A B | rintro : B -> or A B. +\end{coq_example*} +The following definition which computes a boolean value by case over +the proof of \texttt{or A B} is not accepted~: +\begin{coq_eval} +(***************************************************************) +(*** This example should fail with ``Incorrect elimination'' ***) +\end{coq_eval} +\begin{coq_example} +Definition choice (A B: Prop) (x:or A B) := + match x with lintro a => true | rintro b => false end. +\end{coq_example} +From the computational point of view, the structure of the proof of +\texttt{(or A B)} in this term is needed for computing the boolean +value. + +In general, if $I$ has type \Prop\ then $P$ cannot have type $I\ra +\Set$, because it will mean to build an informative proof of type +$(P~m)$ doing a case analysis over a non-computational object that +will disappear in the extracted program. But the other way is safe +with respect to our interpretation we can have $I$ a computational +object and $P$ a non-computational one, it just corresponds to proving +a logical property of a computational object. + +% Also if $I$ is in one of the sorts \{\Prop, \Set\}, one cannot in +% general allow an elimination over a bigger sort such as \Type. But +% this operation is safe whenever $I$ is a {\em small inductive} type, +% which means that all the types of constructors of +% $I$ are small with the following definition:\\ +% $(I~t_1\ldots t_s)$ is a {\em small type of constructor} and +% $\forall~x:T,C$ is a small type of constructor if $C$ is and if $T$ +% has type \Prop\ or \Set. \index{Small inductive type} + +% We call this particular elimination which gives the possibility to +% compute a type by induction on the structure of a term, a {\em strong +% elimination}\index{Strong elimination}. + +In the same spirit, elimination on $P$ of type $I\ra +\Type$ cannot be allowed because it trivially implies the elimination +on $P$ of type $I\ra \Set$ by cumulativity. It also implies that there +is two proofs of the same property which are provably different, +contradicting the proof-irrelevance property which is sometimes a +useful axiom~: +\begin{coq_example} +Axiom proof_irrelevance : forall (P : Prop) (x y : P), x=y. +\end{coq_example} +\begin{coq_eval} +Reset proof_irrelevance. +\end{coq_eval} +The elimination of an inductive definition of type \Prop\ on a +predicate $P$ of type $I\ra \Type$ leads to a paradox when applied to +impredicative inductive definition like the second-order existential +quantifier \texttt{exProp} defined above, because it give access to +the two projections on this type. + +%\paragraph{Warning: strong elimination} +%\index{Elimination!Strong elimination} +%In previous versions of Coq, for a small inductive definition, only the +%non-informative strong elimination on \Type\ was allowed, because +%strong elimination on \Typeset\ was not compatible with the current +%extraction procedure. In this version, strong elimination on \Typeset\ +%is accepted but a dummy element is extracted from it and may generate +%problems if extracted terms are explicitly used such as in the +%{\tt Program} tactic or when extracting ML programs. + +\paragraph{Empty and singleton elimination} +\index{Elimination!Singleton elimination} +\index{Elimination!Empty elimination} + +There are special inductive definitions in \Prop\ for which more +eliminations are allowed. +\begin{description} +\item[\Prop-extended] +\inference{ + \frac{I \mbox{~is an empty or singleton + definition}~~~s\in\Sort}{\compat{I:\Prop}{I\ra s}} +} +\end{description} + +% A {\em singleton definition} has always an informative content, +% even if it is a proposition. + +A {\em singleton +definition} has only one constructor and all the arguments of this +constructor have type \Prop. In that case, there is a canonical +way to interpret the informative extraction on an object in that type, +such that the elimination on any sort $s$ is legal. Typical examples are +the conjunction of non-informative propositions and the equality. +If there is an hypothesis $h:a=b$ in the context, it can be used for +rewriting not only in logical propositions but also in any type. +% In that case, the term \verb!eq_rec! which was defined as an axiom, is +% now a term of the calculus. +\begin{coq_example} +Print eq_rec. +Extraction eq_rec. +\end{coq_example} +An empty definition has no constructors, in that case also, +elimination on any sort is allowed. + +\paragraph{Type of branches.} +Let $c$ be a term of type $C$, we assume $C$ is a type of constructor +for an inductive definition $I$. Let $P$ be a term that represents the +property to be proved. +We assume $r$ is the number of parameters. + +We define a new type \CI{c:C}{P} which represents the type of the +branch corresponding to the $c:C$ constructor. +\[ +\begin{array}{ll} +\CI{c:(I_i~p_1\ldots p_r\ t_1 \ldots t_p)}{P} &\equiv (P~t_1\ldots ~t_p~c) \\[2mm] +\CI{c:\forall~x:T,C}{P} &\equiv \forall~x:T,\CI{(c~x):C}{P} +\end{array} +\] +We write \CI{c}{P} for \CI{c:C}{P} with $C$ the type of $c$. + +\paragraph{Examples.} +For $\ListA$ the type of $P$ will be $\ListA\ra s$ for $s \in \Sort$. \\ +$ \CI{(\cons~A)}{P} \equiv +\forall a:A, \forall l:\ListA,(P~(\cons~A~a~l))$. + +For $\LengthA$, the type of $P$ will be +$\forall l:\ListA,\forall n:\nat, (\LengthA~l~n)\ra \Prop$ and the expression +\CI{(\LCons~A)}{P} is defined as:\\ +$\forall a:A, \forall l:\ListA, \forall n:\nat, \forall +h:(\LengthA~l~n), (P~(\cons~A~a~l)~(\nS~n)~(\LCons~A~a~l~n~l))$.\\ +If $P$ does not depend on its third argument, we find the more natural +expression:\\ +$\forall a:A, \forall l:\ListA, \forall n:\nat, +(\LengthA~l~n)\ra(P~(\cons~A~a~l)~(\nS~n))$. + +\paragraph{Typing rule.} + +Our very general destructor for inductive definition enjoys the +following typing rule +% , where we write +% \[ +% \Case{P}{c}{[x_{11}:T_{11}]\ldots[x_{1p_1}:T_{1p_1}]g_1\ldots +% [x_{n1}:T_{n1}]\ldots[x_{np_n}:T_{np_n}]g_n} +% \] +% for +% \[ +% \Case{P}{c}{(c_1~x_{11}~...~x_{1p_1}) \Ra g_1 ~|~\ldots~|~ +% (c_n~x_{n1}...x_{np_n}) \Ra g_n } +% \] + +\begin{description} +\item[match] \label{elimdep} \index{Typing rules!match} +\inference{ +\frac{\WTEG{c}{(I~q_1\ldots q_r~t_1\ldots t_s)}~~ + \WTEG{P}{B}~~\compat{(I~q_1\ldots q_r)}{B} + ~~ +(\WTEG{f_i}{\CI{(c_{p_i}~q_1\ldots q_r)}{P}})_{i=1\ldots l}} +{\WTEG{\Case{P}{c}{f_1\ldots f_l}}{(P\ t_1\ldots t_s\ c)}}}%\\[3mm] + +provided $I$ is an inductive type in a declaration +\Ind{\Delta}{\Gamma_P}{\Gamma_I}{\Gamma_C} with $|\Gamma_P| = r$, +$\Gamma_C = [c_1:C_1;\ldots;c_n:C_n]$ and $c_{p_1}\ldots c_{p_l}$ are the +only constructors of $I$. +\end{description} + +\paragraph{Example.} +For \List\ and \Length\ the typing rules for the {\tt match} expression +are (writing just $t:M$ instead of \WTEG{t}{M}, the environment and +context being the same in all the judgments). + +\[\frac{l:\ListA~~P:\ListA\ra s~~~f_1:(P~(\Nil~A))~~ + f_2:\forall a:A, \forall l:\ListA, (P~(\cons~A~a~l))} + {\Case{P}{l}{f_1~f_2}:(P~l)}\] + +\[\frac{ + \begin{array}[b]{@{}c@{}} +H:(\LengthA~L~N) \\ P:\forall l:\ListA, \forall n:\nat, (\LengthA~l~n)\ra + \Prop\\ + f_1:(P~(\Nil~A)~\nO~\LNil) \\ + f_2:\forall a:A, \forall l:\ListA, \forall n:\nat, \forall + h:(\LengthA~l~n), (P~(\cons~A~a~n)~(\nS~n)~(\LCons~A~a~l~n~h)) + \end{array}} + {\Case{P}{H}{f_1~f_2}:(P~L~N~H)}\] + +\paragraph{Definition of $\iota$-reduction.}\label{iotared} +\index{iota-reduction@$\iota$-reduction} +We still have to define the $\iota$-reduction in the general case. + +A $\iota$-redex is a term of the following form: +\[\Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1\ldots + f_l}\] +with $c_{p_i}$ the $i$-th constructor of the inductive type $I$ with $r$ +parameters. + +The $\iota$-contraction of this term is $(f_i~a_1\ldots a_m)$ leading +to the general reduction rule: +\[ \Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1\ldots + f_n} \triangleright_{\iota} (f_i~a_1\ldots a_m) \] + +\subsection{Fixpoint definitions} +\label{Fix-term} \index{Fix@{\tt Fix}} +The second operator for elimination is fixpoint definition. +This fixpoint may involve several mutually recursive definitions. +The basic concrete syntax for a recursive set of mutually recursive +declarations is (with $\Gamma_i$ contexts)~: +\[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n +(\Gamma_n) :A_n:=t_n\] +The terms are obtained by projections from this set of declarations +and are written +\[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n +(\Gamma_n) :A_n:=t_n~\kw{for}~f_i\] +In the inference rules, we represent such a +term by +\[\Fix{f_i}{f_1:A_1':=t_1' \ldots f_n:A_n':=t_n'}\] +with $t_i'$ (resp. $A_i'$) representing the term $t_i$ abstracted +(resp. generalized) with +respect to the bindings in the context $\Gamma_i$, namely +$t_i'=\lb \Gamma_i \mto t_i$ and $A_i'=\forall \Gamma_i, A_i$. + +\subsubsection{Typing rule} +The typing rule is the expected one for a fixpoint. + +\begin{description} +\item[Fix] \index{Typing rules!Fix} +\inference{\frac{(\WTEG{A_i}{s_i})_{i=1\ldots n}~~~~ + (\WTE{\Gamma,f_1:A_1,\ldots,f_n:A_n}{t_i}{A_i})_{i=1\ldots n}} + {\WTEG{\Fix{f_i}{f_1:A_1:=t_1 \ldots f_n:A_n:=t_n}}{A_i}}} +\end{description} + +Any fixpoint definition cannot be accepted because non-normalizing terms +will lead to proofs of absurdity. + +The basic scheme of recursion that should be allowed is the one needed for +defining primitive +recursive functionals. In that case the fixpoint enjoys a special +syntactic restriction, namely one of the arguments belongs to an +inductive type, the function starts with a case analysis and recursive +calls are done on variables coming from patterns and representing subterms. + +For instance in the case of natural numbers, a proof of the induction +principle of type +\[\forall P:\nat\ra\Prop, (P~\nO)\ra((n:\nat)(P~n)\ra(P~(\nS~n)))\ra +\forall n:\nat, (P~n)\] +can be represented by the term: +\[\begin{array}{l} +\lb P:\nat\ra\Prop\mto\lb f:(P~\nO)\mto \lb g:(\forall n:\nat, +(P~n)\ra(P~(\nS~n))) \mto\\ +\Fix{h}{h:\forall n:\nat, (P~n):=\lb n:\nat\mto \Case{P}{n}{f~\lb + p:\nat\mto (g~p~(h~p))}} +\end{array} +\] + +Before accepting a fixpoint definition as being correctly typed, we +check that the definition is ``guarded''. A precise analysis of this +notion can be found in~\cite{Gim94}. + +The first stage is to precise on which argument the fixpoint will be +decreasing. The type of this argument should be an inductive +definition. + +For doing this the syntax of fixpoints is extended and becomes + \[\Fix{f_i}{f_1/k_1:A_1:=t_1 \ldots f_n/k_n:A_n:=t_n}\] +where $k_i$ are positive integers. +Each $A_i$ should be a type (reducible to a term) starting with at least +$k_i$ products $\forall y_1:B_1,\ldots \forall y_{k_i}:B_{k_i}, A'_i$ +and $B_{k_i}$ +being an instance of an inductive definition. + +Now in the definition $t_i$, if $f_j$ occurs then it should be applied +to at least $k_j$ arguments and the $k_j$-th argument should be +syntactically recognized as structurally smaller than $y_{k_i}$ + + +The definition of being structurally smaller is a bit technical. +One needs first to define the notion of +{\em recursive arguments of a constructor}\index{Recursive arguments}. +For an inductive definition \Ind{\Gamma}{\Gamma_P}{\Gamma_I}{\Gamma_C}, +the type of a constructor $c$ have the form +$\forall p_1:P_1,\ldots \forall p_r:P_r, +\forall x_1:T_1, \ldots \forall x_r:T_r, (I_j~p_1\ldots +p_r~t_1\ldots t_s)$ the recursive arguments will correspond to $T_i$ in +which one of the $I_l$ occurs. + + +The main rules for being structurally smaller are the following:\\ +Given a variable $y$ of type an inductive +definition in a declaration +\Ind{\Gamma}{\Gamma_P}{\Gamma_I}{\Gamma_C} +where $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is + $[c_1:C_1;\ldots;c_n:C_n]$. +The terms structurally smaller than $y$ are: +\begin{itemize} +\item $(t~u), \lb x:u \mto t$ when $t$ is structurally smaller than $y$ . +\item \Case{P}{c}{f_1\ldots f_n} when each $f_i$ is structurally + smaller than $y$. \\ + If $c$ is $y$ or is structurally smaller than $y$, its type is an inductive + definition $I_p$ part of the inductive + declaration corresponding to $y$. + Each $f_i$ corresponds to a type of constructor $C_q \equiv + \forall y_1:B_1, \ldots \forall y_k:B_k, (I~a_1\ldots a_k)$ + and can consequently be + written $\lb y_1:B'_1\mto \ldots \lb y_k:B'_k\mto g_i$. + ($B'_i$ is obtained from $B_i$ by substituting parameters variables) + the variables $y_j$ occurring + in $g_i$ corresponding to recursive arguments $B_i$ (the ones in + which one of the $I_l$ occurs) are structurally smaller than $y$. +\end{itemize} +The following definitions are correct, we enter them using the +{\tt Fixpoint} command as described in section~\ref{Fixpoint} and show +the internal representation. +\begin{coq_example} +Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (plus p m) + end. +Print plus. +Fixpoint lgth (A:Set) (l:list A) {struct l} : nat := + match l with + | nil => O + | cons a l' => S (lgth A l') + end. +Print lgth. +Fixpoint sizet (t:tree) : nat := let (f) := t in S (sizef f) + with sizef (f:forest) : nat := + match f with + | emptyf => O + | consf t f => plus (sizet t) (sizef f) + end. +Print sizet. +\end{coq_example} + + +\subsubsection{Reduction rule} +\index{iota-reduction@$\iota$-reduction} +Let $F$ be the set of declarations: $f_1/k_1:A_1:=t_1 \ldots +f_n/k_n:A_n:=t_n$. +The reduction for fixpoints is: +\[ (\Fix{f_i}{F}~a_1\ldots +a_{k_i}) \triangleright_{\iota} \substs{t_i}{f_k}{\Fix{f_k}{F}}{k=1\ldots n}\] +when $a_{k_i}$ starts with a constructor. +This last restriction is needed in order to keep strong normalization +and corresponds to the reduction for primitive recursive operators. + +We can illustrate this behavior on examples. +\begin{coq_example} +Goal forall n m:nat, plus (S n) m = S (plus n m). +reflexivity. +Abort. +Goal forall f:forest, sizet (node f) = S (sizef f). +reflexivity. +Abort. +\end{coq_example} +But assuming the definition of a son function from \tree\ to \forest: +\begin{coq_example} +Definition sont (t:tree) : forest + := let (f) := t in f. +\end{coq_example} +The following is not a conversion but can be proved after a case analysis. +\begin{coq_eval} +(******************************************************************) +(** Error: Impossible to unify .... **) +\end{coq_eval} +\begin{coq_example} +Goal forall t:tree, sizet t = S (sizef (sont t)). +reflexivity. (** this one fails **) +destruct t. +reflexivity. +\end{coq_example} +\begin{coq_eval} +Abort. +\end{coq_eval} + +% La disparition de Program devrait rendre la construction Match obsolete +% \subsubsection{The {\tt Match \ldots with \ldots end} expression} +% \label{Matchexpr} +% %\paragraph{A unary {\tt Match\ldots with \ldots end}.} +% \index{Match...with...end@{\tt Match \ldots with \ldots end}} +% The {\tt Match} operator which was a primitive notion in older +% presentations of the Calculus of Inductive Constructions is now just a +% macro definition which generates the good combination of {\tt Case} +% and {\tt Fix} operators in order to generate an operator for primitive +% recursive definitions. It always considers an inductive definition as +% a single inductive definition. + +% The following examples illustrates this feature. +% \begin{coq_example} +% Definition nat_pr : (C:Set)C->(nat->C->C)->nat->C +% :=[C,x,g,n]Match n with x g end. +% Print nat_pr. +% \end{coq_example} +% \begin{coq_example} +% Definition forest_pr +% : (C:Set)C->(tree->forest->C->C)->forest->C +% := [C,x,g,n]Match n with x g end. +% \end{coq_example} + +% Cet exemple faisait error (HH le 12/12/96), j'ai change pour une +% version plus simple +%\begin{coq_example} +%Definition forest_pr +% : (P:forest->Set)(P emptyf)->((t:tree)(f:forest)(P f)->(P (consf t f))) +% ->(f:forest)(P f) +% := [C,x,g,n]Match n with x g end. +%\end{coq_example} + +\subsubsection{Mutual induction} + +The principles of mutual induction can be automatically generated +using the {\tt Scheme} command described in section~\ref{Scheme}. + +\section{Coinductive types} +The implementation contains also coinductive definitions, which are +types inhabited by infinite objects. +More information on coinductive definitions can be found +in~\cite{Gimenez95b,Gim98}. +%They are described inchapter~\ref{Coinductives}. + +\section{\iCIC : the Calculus of Inductive Construction with + impredicative \Set}\label{impredicativity} + +\Coq{} can be used as a type-checker for \iCIC{}, the original +Calculus of Inductive Constructions with an impredicative sort \Set{} +by using the compiler option \texttt{-impredicative-set}. + +For example, using the ordinary \texttt{coqtop} command, the following +is rejected. +\begin{coq_eval} +(** This example should fail ******************************* + Error: The term forall X:Set, X -> X has type Type + while it is expected to have type Set +***) +\end{coq_eval} +\begin{coq_example} +Definition id: Set := forall X:Set,X->X. +\end{coq_example} +while it will type-check, if one use instead the \texttt{coqtop + -impredicative-set} command. + +The major change in the theory concerns the rule for product formation +in the sort \Set, which is extended to a domain in any sort~: +\begin{description} +\item [Prod] \index{Typing rules!Prod (impredicative Set)} +\inference{\frac{\WTEG{T}{s}~~~~s\in\Sort~~~~~~ + \WTE{\Gamma::(x:T)}{U}{\Set}} + { \WTEG{\forall~x:T,U}{\Set}}} +\end{description} +This extension has consequences on the inductive definitions which are +allowed. +In the impredicative system, one can build so-called {\em large inductive + definitions} like the example of second-order existential +quantifier (\texttt{exSet}). + +There should be restrictions on the eliminations which can be +performed on such definitions. The eliminations rules in the +impredicative system for sort \Set{} become~: +\begin{description} +\item[\Set] \inference{\frac{s \in + \{\Prop, \Set\}}{\compat{I:\Set}{I\ra s}} +~~~~\frac{I \mbox{~is a small inductive definition}~~~~s \in + \{\Type(i)\}} + {\compat{I:\Set}{I\ra s}}} +\end{description} + + + +% $Id: RefMan-cic.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: + + diff --git a/doc/refman/RefMan-coi.tex b/doc/refman/RefMan-coi.tex new file mode 100644 index 00000000..a7b57ca3 --- /dev/null +++ b/doc/refman/RefMan-coi.tex @@ -0,0 +1,406 @@ +%\documentstyle[11pt,../tools/coq-tex/coq]{article} +%\input{title} + +%\include{macros} +%\begin{document} + +%\coverpage{Co-inductive types in Coq}{Eduardo Gim\'enez} +\chapter{Co-inductive types in Coq}\label{Coinductives} + +%\begin{abstract} +{\it Co-inductive} types are types whose elements may not be well-founded. +A formal study of the Calculus of Constructions extended by +co-inductive types has been presented +in \cite{Gim94}. It is based on the notion of +{\it guarded definitions} introduced by Th. Coquand +in \cite{Coquand93}. The implementation is by E. Gim\'enez. +%\end{abstract} + +\section{A short introduction to co-inductive types} + +We assume that the reader is rather familiar with inductive types. +These types are characterized by their {\it constructors}, which can be +regarded as the basic methods from which the elements +of the type can be built up. It is implicit in the definition +of an inductive type that +its elements are the result of a {\it finite} number of +applications of its constructors. Co-inductive types arise from +relaxing this implicit condition and admitting that an element of +the type can also be introduced by a non-ending (but effective) process +of construction defined in terms of the basic methods which characterize the +type. So we could think in the wider notion of types defined by +constructors (let us call them {\it recursive types}) and classify +them into inductive and co-inductive ones, depending on whether or not +we consider non-ending methods as admissible for constructing elements +of the type. Note that in both cases we obtain a ``closed type'', all whose +elements are pre-determined in advance (by the constructors). When we +know that $a$ is an element of a recursive type (no matter if it is +inductive or co-inductive) what we know is that it is the result of applying +one of the basic forms of construction allowed for the type. +So the more primitive way of eliminating an element of a recursive type is +by case analysis, i.e. by considering through which constructor it could have +been introduced. In the case of inductive sets, the additional knowledge that +constructors can be applied only a finite number of times provide +us with a more powerful way of eliminating their elements, say, +the principle of +induction. This principle is obviously not valid for co-inductive types, +since it is just the expression of this extra knowledge attached to inductive +types. + + +An example of a co-inductive type is the type of infinite sequences formed with +elements of type $A$, or streams for shorter. In Coq, +it can be introduced using the \verb!CoInductive! command~: +\begin{coq_example} +CoInductive Stream (A:Set) : Set := + cons : A -> Stream A -> Stream A. +\end{coq_example} + +The syntax of this command is the same as the +command \verb!Inductive! (cf. section +\ref{gal_Inductive_Definitions}). +Definition of mutually coinductive types are possible. + +As was already said, there are not principles of +induction for co-inductive sets, the only way of eliminating these +elements is by case analysis. +In the example of streams, this elimination principle can be +used for instance to define the well known +destructors on streams $\hd : (\Str\;A)\rightarrow A$ +and $\tl: (\Str\;A)\rightarrow (\Str\;A)$ : +\begin{coq_example} +Section Destructors. +Variable A : Set. +Definition hd (x:Stream A) := match x with + | cons a s => a + end. +Definition tl (x:Stream A) := match x with + | cons a s => s + end. +\end{coq_example} +\begin{coq_example*} +End Destructors. +\end{coq_example*} + +\subsection{Non-ending methods of construction} + +At this point the reader should have realized that we have left unexplained +what is a ``non-ending but effective process of +construction'' of a stream. In the widest sense, a +method is a non-ending process of construction if we can eliminate the +stream that it introduces, in other words, if we can reduce +any case analysis on it. In this sense, the following ways of +introducing a stream are not acceptable. +\begin{center} +$\zeros = (\cons\;\nat\;\nO\;(\tl\;\zeros))\;\;:\;\;(\Str\;\nat)$\\[12pt] +$\filter\;(\cons\;A\;a\;s) = \si\;\;(P\;a)\;\;\alors\;\;(\cons\;A\;a\;(\filter\;s))\;\;\sinon\;\;(\filter\;s) )\;\;:\;\;(\Str\;A)$ +\end{center} +\noindent The former it is not valid since the stream can not be eliminated +to obtain its tail. In the latter, a stream is naively defined as +the result of erasing from another (arbitrary) stream +all the elements which does not verify a certain property $P$. This +does not always makes sense, for example it does not when all the elements +of the stream verify $P$, in which case we can not eliminate it to +obtain its head\footnote{Note that there is no notion of ``the empty +stream'', a stream is always infinite and build by a \texttt{cons}.}. +On the contrary, the following definitions are acceptable methods for +constructing a stream~: +\begin{center} +$\zeros = (\cons\;\nat\;\nO\;\zeros)\;\;:\;\;(\Str\;\nat)\;\;\;(*)$\\[12pt] +$(\from\;n) = (\cons\;\nat\;n\;(\from\;(\nS\;n)))\;:\;(\Str\;\nat)$\\[12pt] +$\alter = (\cons\;\bool\;\true\;(\cons\;\bool\;\false\;\alter))\;:\;(\Str\;\bool)$. +\end{center} +\noindent The first one introduces a stream containing all the natural numbers +greater than a given one, and the second the stream which infinitely +alternates the booleans true and false. + +In general it is not evident to realise when a definition can +be accepted or not. However, there is a class of definitions that +can be easily recognised as being valid : those +where (1) all the recursive calls of the method are done +after having explicitly mentioned which is (at least) the first constructor +to start building the element, and (2) no other +functions apart from constructors are applied to recursive calls. +This class of definitions is usually +referred as {\it guarded-by-constructors} +definitions \cite{Coquand93,Gim94}. +The methods $\from$ +and $\alter$ are examples of definitions which are guarded by constructors. +The definition of function $\filter$ is not, because there is no +constructor to guard +the recursive call in the {\it else} branch. Neither is the one of +$\zeros$, since there is function applied to the recursive call +which is not a constructor. However, there is a difference between +the definition of $\zeros$ and $\filter$. The former may be seen as a +wrong way of characterising an object which makes sense, and it can +be reformulated in an admissible way using the equation (*). On the contrary, +the definition of +$\filter$ can not be patched, since is the idea itself +of traversing an infinite +construction searching for an element whose existence is not ensured +which does not make sense. + + + +Guarded definitions are exactly the kind of non-ending process of +construction which are allowed in Coq. The way of introducing +a guarded definition in Coq is using the special command +{\tt CoFixpoint}. This command verifies that the definition introduces an +element of a co-inductive type, and checks if it is guarded by constructors. +If we try to +introduce the definitions above, $\from$ and $\alter$ will be accepted, +while $\zeros$ and $\filter$ will be rejected giving some explanation +about why. +\begin{coq_example} +CoFixpoint zeros : Stream nat := cons nat 0%N (tl nat zeros). +CoFixpoint zeros : Stream nat := cons nat 0%N zeros. +CoFixpoint from (n:nat) : Stream nat := cons nat n (from (S n)). +\end{coq_example} + +As in the \verb!Fixpoint! command (cf. section~\ref{Fixpoint}), it is possible +to introduce a block of mutually dependent methods. The general syntax +for this case is : + +{\tt CoFixpoint {\ident$_1$} :{\term$_1$} := {\term$_1'$}\\ + with\\ + \mbox{}\hspace{0.1cm} $\ldots$ \\ + with {\ident$_m$} : {\term$_m$} := {\term$_m'$}} + + +\subsection{Non-ending methods and reduction} + +The elimination of a stream introduced by a \verb!CoFixpoint! definition +is done lazily, i.e. its definition can be expanded only when it occurs +at the head of an application which is the argument of a case expression. +Isolately it is considered as a canonical expression which +is completely evaluated. We can test this using the command \verb!compute! +to calculate the normal forms of some terms~: +\begin{coq_example} +Eval compute in (from 0). +Eval compute in (hd nat (from 0)). +Eval compute in (tl nat (from 0)). +\end{coq_example} +\noindent Thus, the equality +$(\from\;n)\equiv(\cons\;\nat\;n\;(\from \; (\S\;n)))$ +does not hold as definitional one. Nevertheless, it can be proved +as a propositional equality, in the sense of Leibniz's equality. +The version {\it à la Leibniz} of the equality above follows from +a general lemma stating that eliminating and then re-introducing a stream +yields the same stream. +\begin{coq_example} +Lemma unfold_Stream : + forall x:Stream nat, x = match x with + | cons a s => cons nat a s + end. +\end{coq_example} + +\noindent The proof is immediate from the analysis of +the possible cases for $x$, which transforms +the equality in a trivial one. + +\begin{coq_example} +olddestruct x. +trivial. +\end{coq_example} +\begin{coq_eval} +Qed. +\end{coq_eval} +The application of this lemma to $(\from\;n)$ puts this +constant at the head of an application which is an argument +of a case analysis, forcing its expansion. +We can test the type of this application using Coq's command \verb!Check!, +which infers the type of a given term. +\begin{coq_example} +Check (fun n:nat => unfold_Stream (from n)). +\end{coq_example} + \noindent Actually, The elimination of $(\from\;n)$ has actually +no effect, because it is followed by a re-introduction, +so the type of this application is in fact +definitionally equal to the +desired proposition. We can test this computing +the normal form of the application above to see its type. +\begin{coq_example} +Transparent unfold_Stream. +Eval compute in (fun n:nat => unfold_Stream (from n)). +\end{coq_example} + + +\section{Reasoning about infinite objects} + +At a first sight, it might seem that +case analysis does not provide a very powerful way +of reasoning about infinite objects. In fact, what we can prove about +an infinite object using +only case analysis is just what we can prove unfolding its method +of construction a finite number of times, which is not always +enough. Consider for example the following method for appending +two streams~: +\begin{coq_example} +Variable A : Set. +CoFixpoint conc (s1 s2:Stream A) : Stream A := + cons A (hd A s1) (conc (tl A s1) s2). +\end{coq_example} + +Informally speaking, we expect that for all pair of streams $s_1$ and $s_2$, +$(\conc\;s_1\;s_2)$ +defines the ``the same'' stream as $s_1$, +in the sense that if we would be able to unfold the definition +``up to the infinite'', we would obtain definitionally equal normal forms. +However, no finite unfolding of the definitions gives definitionally +equal terms. Their equality can not be proved just using case analysis. + + +The weakness of the elimination principle proposed for infinite objects +contrast with the power provided by the inductive +elimination principles, but it is not actually surprising. It just means +that we can not expect to prove very interesting things about infinite +objects doing finite proofs. To take advantage of infinite objects we +have to consider infinite proofs as well. For example, +if we want to catch up the equality between $(\conc\;s_1\;s_2)$ and +$s_1$ we have to introduce first the type of the infinite proofs +of equality between streams. This is a +co-inductive type, whose elements are build up from a +unique constructor, requiring a proof of the equality of the +heads of the streams, and an (infinite) proof of the equality +of their tails. + +\begin{coq_example} +CoInductive EqSt : Stream A -> Stream A -> Prop := + eqst : + forall s1 s2:Stream A, + hd A s1 = hd A s2 -> EqSt (tl A s1) (tl A s2) -> EqSt s1 s2. +\end{coq_example} +\noindent Now the equality of both streams can be proved introducing +an infinite object of type + +\noindent $(\EqSt\;s_1\;(\conc\;s_1\;s_2))$ by a \verb!CoFixpoint! +definition. +\begin{coq_example} +CoFixpoint eqproof (s1 s2:Stream A) : EqSt s1 (conc s1 s2) := + eqst s1 (conc s1 s2) (refl_equal (hd A (conc s1 s2))) + (eqproof (tl A s1) s2). +\end{coq_example} +\begin{coq_eval} +Reset eqproof. +\end{coq_eval} +\noindent Instead of giving an explicit definition, +we can use the proof editor of Coq to help us in +the construction of the proof. +A tactic \verb!Cofix! allows to place a \verb!CoFixpoint! definition +inside a proof. +This tactic introduces a variable in the context which has +the same type as the current goal, and its application stands +for a recursive call in the construction of the proof. If no name is +specified for this variable, the name of the lemma is chosen by +default. +%\pagebreak + +\begin{coq_example} +Lemma eqproof : forall s1 s2:Stream A, EqSt s1 (conc s1 s2). +cofix. +\end{coq_example} + +\noindent An easy (and wrong!) way of finishing the proof is just to apply the +variable \verb!eqproof!, which has the same type as the goal. + +\begin{coq_example} +intros. +apply eqproof. +\end{coq_example} + +\noindent The ``proof'' constructed in this way +would correspond to the \verb!CoFixpoint! definition +\begin{coq_example*} +CoFixpoint eqproof : forall s1 s2:Stream A, EqSt s1 (conc s1 s2) := + eqproof. +\end{coq_example*} + +\noindent which is obviously non-guarded. This means that +we can use the proof editor to +define a method of construction which does not make sense. However, +the system will never accept to include it as part of the theory, +because the guard condition is always verified before saving the proof. + +\begin{coq_example} +Qed. +\end{coq_example} + +\noindent Thus, the user must be careful in the +construction of infinite proofs +with the tactic \verb!Cofix!. Remark that once it has been used +the application of tactics performing automatic proof search in +the environment (like for example \verb!Auto!) +could introduce unguarded recursive calls in the proof. +The command \verb!Guarded! allows to verify +if the guarded condition has been violated +during the construction of the proof. This command can be +applied even if the proof term is not complete. + + + +\begin{coq_example} +Restart. +cofix. +auto. +Guarded. +Undo. +Guarded. +\end{coq_example} + +\noindent To finish with this example, let us restart from the +beginning and show how to construct an admissible proof~: + +\begin{coq_example} +Restart. + cofix. +\end{coq_example} + +%\pagebreak + +\begin{coq_example} +intros. +apply eqst. +trivial. +simpl. +apply eqproof. +Qed. +\end{coq_example} + + +\section{Experiments with co-inductive types} + +Some examples involving co-inductive types are available with +the distributed system, in the theories library and in the contributions +of the Lyon site. Here we present a short description of their contents~: +\begin{itemize} +\item Directory \verb!theories/LISTS! : + \begin{itemize} + \item File \verb!Streams.v! : The type of streams and the +extensional equality between streams. + \end{itemize} + +\item Directory \verb!contrib/Lyon/COINDUCTIVES! : + \begin{itemize} + \item Directory \verb!ARITH! : An arithmetic where $\infty$ +is an explicit constant of the language instead of a metatheoretical notion. + \item Directory \verb!STREAM! : + \begin{itemize} + \item File \verb!Examples! : +Several examples of guarded definitions, as well as +of frequent errors in the introduction of a stream. A different +way of defining the extensional equality of two streams, +and the proofs showing that it is equivalent to the one in \verb!theories!. + \item File \verb!Alter.v! : An example showing how +an infinite proof introduced by a guarded definition can be also described +using an operator of co-recursion \cite{Gimenez95b}. + \end{itemize} +\item Directory \verb!PROCESSES! : A proof of the alternating +bit protocol based on Pra\-sad's Calculus of Broadcasting Systems \cite{Prasad93}, +and the verification of an interpreter for this calculus. +See \cite{Gimenez95b} for a complete description about this development. + \end{itemize} +\end{itemize} + +%\end{document} + +% $Id: RefMan-coi.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ diff --git a/doc/refman/RefMan-com.tex b/doc/refman/RefMan-com.tex new file mode 100644 index 00000000..f8a7546f --- /dev/null +++ b/doc/refman/RefMan-com.tex @@ -0,0 +1,280 @@ +\chapter{The \Coq~commands}\label{Addoc-coqc} +\ttindex{coqtop} +\ttindex{coqc} + +There are two \Coq~commands: +\begin{itemize} +\item {\tt coqtop}: The \Coq\ toplevel (interactive mode) ; +\item {\tt coqc} : The \Coq\ compiler (batch compilation). +\end{itemize} +The options are (basically) the same for the two commands, and +roughly described below. You can also look at the \verb!man! pages of +\verb!coqtop! and \verb!coqc! for more details. + + +\section{Interactive use ({\tt coqtop})} + +In the interactive mode, also known as the \Coq~toplevel, the user can +develop his theories and proofs step by step. The \Coq~toplevel is +run by the command {\tt coqtop}. + +\index{byte-code} +\index{native code} +\label{binary-images} +They are two different binary images of \Coq: the byte-code one and +the native-code one (if Objective Caml provides a native-code compiler +for your platform, which is supposed in the following). When invoking +\verb!coqtop! or \verb!coqc!, the native-code version of the system is +used. The command-line options \verb!-byte! and \verb!-opt! explicitly +select the byte-code and the native-code versions, respectively. + +The byte-code toplevel is based on a Caml +toplevel (to allow the dynamic link of tactics). You can switch to +the Caml toplevel with the command \verb!Drop.!, and come back to the +\Coq~toplevel with the command \verb!Toplevel.loop();;!. + +% The command \verb!coqtop -searchisos! runs the search tool {\sf +% Coq\_SearchIsos} (see section~\ref{coqsearchisos}, +% page~\pageref{coqsearchisos}) and, as the \Coq~system, can be combined +% with the option \verb!-opt!. + +\section{Batch compilation ({\tt coqc})} +The {\tt coqc} command takes a name {\em file} as argument. Then it +looks for a vernacular file named {\em file}{\tt .v}, and tries to +compile it into a {\em file}{\tt .vo} file (See ~\ref{compiled}). + +\Warning The name {\em file} must be a regular {\Coq} identifier, as +defined in the section \ref{lexical}. It +must only contain letters, digits or underscores +(\_). Thus it can be \verb+/bar/foo/toto.v+ but cannot be +\verb+/bar/foo/to-to.v+ . + +Notice that the \verb!-byte! and \verb!-opt! options are still +available with \verb!coqc! and allow you to select the byte-code or +native-code versions of the system. + + +\section{Resource file} +\index{Resource file} + +When \Coq\ is launched, with either {\tt coqtop} or {\tt coqc}, the +resource file \verb:$HOME/.coqrc.7.0: is loaded, where \verb:$HOME: is +the home directory of the user. If this file is not found, then the +file \verb:$HOME/.coqrc: is searched. You can also specify an +arbitrary name for the resource file (see option \verb:-init-file: +below), or the name of another user to load the resource file of +someone else (see option \verb:-user:). + +This file may contain, for instance, \verb:Add LoadPath: commands to add +directories to the load path of \Coq. +It is possible to skip the loading of the resource file with the +option \verb:-q:. + +\section{Environment variables} +\label{EnvVariables} +\index{Environment variables} + +There are three environment variables used by the \Coq\ system. +\verb:$COQBIN: for the directory where the binaries are, +\verb:$COQLIB: for the directory whrer the standard library is, and +\verb:$COQTOP: for the directory of the sources. The latter is useful +only for developers that are writing their own tactics and are using +\texttt{coq\_makefile} (see \ref{Makefile}). If \verb:$COQBIN: or +\verb:$COQLIB: are not defined, \Coq\ will use the default values +(defined at installation time). So these variables are useful only if +you move the \Coq\ binaries and library after installation. + +\section{Options} +\index{Options of the command line} + +The following command-line options are recognized by the commands {\tt + coqc} and {\tt coqtop}, unless stated otherwise: + +\begin{description} +\item[{\tt -byte}]\ + + Run the byte-code version of \Coq{}. + +\item[{\tt -opt}]\ + + Run the native-code version of \Coq{}. + +\item[{\tt -I} {\em directory}, {\tt -include} {\em directory}]\ + + Add {\em directory} to the searched directories when looking for a + file. + +\item[{\tt -R} {\em directory} {\dirpath}]\ + + This maps the subdirectory structure of physical {\em directory} to + logical {\dirpath} and adds {\em directory} and its subdirectories + to the searched directories when looking for a file. + +\item[{\tt -top} {\dirpath}]\ + + This sets the toplevel module name to {\dirpath} instead of {\tt + Top}. Not valid for {\tt coqc}. + +\item[{\tt -is} {\em file}, {\tt -inputstate} {\em file}]\ + + Cause \Coq~to use the state put in the file {\em file} as its input + state. The default state is {\em initial.coq}. + Mainly useful to build the standard input state. + +\item[{\tt -outputstate} {\em file}]\ + + Cause \Coq~to dump its state to file {\em file}.coq just after finishing + parsing and evaluating all the arguments from the command line. + +\item[{\tt -nois}]\ + + Cause \Coq~to begin with an empty state. Mainly useful to build the + standard input state. + +%Obsolete? +% +%\item[{\tt -notactics}]\ +% +% Forbid the dynamic loading of tactics in the bytecode version of {\Coq}. + +\item[{\tt -init-file} {\em file}]\ + + Take {\em file} as the resource file. + +\item[{\tt -q}]\ + + Cause \Coq~not to load the resource file. + +\item[{\tt -user} {\em username}]\ + + Take resource file of user {\em username} (that is + \verb+~+{\em username}{\tt /.coqrc.7.0}) instead of yours. + +\item[{\tt -load-ml-source} {\em file}]\ + + Load the Caml source file {\em file}. + +\item[{\tt -load-ml-object} {\em file}]\ + + Load the Caml object file {\em file}. + +\item[{\tt -l} {\em file}, {\tt -load-vernac-source} {\em file}]\ + + Load \Coq~file {\em file}{\tt .v} + +\item[{\tt -lv} {\em file}, {\tt -load-vernac-source-verbose} {\em file}]\ + + Load \Coq~file {\em file}{\tt .v} with + a copy of the contents of the file on standard input. + +\item[{\tt -load-vernac-object} {\em file}]\ + + Load \Coq~compiled file {\em file}{\tt .vo} + +%\item[{\tt -preload} {\em file}]\ \\ +%Add {\em file}{\tt .vo} to the files to be loaded and opened +%before making the initial state. +% +\item[{\tt -require} {\em file}]\ + + Load \Coq~compiled file {\em file}{\tt .vo} and import it ({\tt + Require} {\em file}). + +\item[{\tt -compile} {\em file}]\ + + This compiles file {\em file}{\tt .v} into {\em file}{\tt .vo}. + This option implies options {\tt -batch} and {\tt -silent}. It is + only available for {\tt coqtop}. + +\item[{\tt -compile-verbose} {\em file}]\ + + This compiles file {\em file}{\tt .v} into {\em file}{\tt .vo} with + a copy of the contents of the file on standard input. + This option implies options {\tt -batch} and {\tt -silent}. It is + only available for {\tt coqtop}. + +\item[{\tt -verbose}]\ + + This option is only for {\tt coqc}. It tells to compile the file with + a copy of its contents on standard input. + +\item[{\tt -batch}]\ + + Batch mode : exit just after arguments parsing. This option is only + used by {\tt coqc}. + +%Mostly unused in the code +%\item[{\tt -debug}]\ +% +% Switch on the debug flag. + +\item[{\tt -xml}]\ + + This option is for use with {\tt coqc}. It tells \Coq\ to export on + the standard output the content of the compiled file into XML format. + +\item[{\tt -quality}] + + Improve the legibility of the proof terms produced by some tactics. + +\item[{\tt -emacs}]\ + + Tells \Coq\ it is executed under Emacs. + +\item[{\tt -impredicative-set}]\ + + Change the logical theory of {\Coq} by declaring the sort {\tt Set} + impredicative; warning: this is known to be inconsistent with + some standard axioms of classical mathematics such as the functional + axiom of choice or the principle of description + +\item[{\tt -dump-glob} {\em file}]\ + + This dumps references for global names in file {\em file} + (to be used by coqdoc, see~\ref{coqdoc}) + +\item[{\tt -dont-load-proofs}]\ + + This avoids loading in memory the proofs of opaque theorems + resulting in a smaller memory requirement and faster compilation; + warning: this invalidates some features such as the extraction tool. + +\item[{\tt -image} {\em file}]\ + + This option sets the binary image to be used to be {\em file} + instead of the standard one. Not of general use. + +\item[{\tt -bindir} {\em directory}]\ + + Set for {\tt coqc} the directory containing \Coq\ binaries. + It is equivalent to do \texttt{export COQBIN=}{\em directory} + before lauching {\tt coqc}. + +\item[{\tt -where}]\ + + Print the \Coq's standard library location and exit. + +\item[{\tt -v}]\ + + Print the \Coq's version and exit. + +\item[{\tt -h}, {\tt --help}]\ + + Print a short usage and exit. + +\end{description} + +% {\tt coqtop} has an additional option: + +% \begin{description} +% \item[{\tt -searchisos}]\ \\ +% Launch the {\sf Coq\_SearchIsos} toplevel +% (see section~\ref{coqsearchisos}, page~\pageref{coqsearchisos}). +% \end{description} + +% $Id: RefMan-com.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-ext.tex b/doc/refman/RefMan-ext.tex new file mode 100644 index 00000000..503d7571 --- /dev/null +++ b/doc/refman/RefMan-ext.tex @@ -0,0 +1,1173 @@ +\chapter{Extensions of \Gallina{}} +\label{Gallina-extension}\index{Gallina} + +{\gallina} is the kernel language of {\Coq}. We describe here extensions of +the Gallina's syntax. + +\section{Record types +\comindex{Record} +\label{Record}} + +The \verb+Record+ construction is a macro allowing the definition of +records as is done in many programming languages. Its syntax is +described on figure \ref{record-syntax}. In fact, the \verb+Record+ +macro is more general than the usual record types, since it allows +also for ``manifest'' expressions. In this sense, the \verb+Record+ +construction allows to define ``signatures''. + +\begin{figure}[h] +\begin{centerframe} +\begin{tabular}{lcl} +{\sentence} & ++= & {\record}\\ + & & \\ +{\record} & ::= & + {\tt Record} {\ident} \sequence{\binderlet}{} {\tt :} {\sort} \verb.:=. \\ +&& ~~~~\zeroone{\ident} + \verb!{! \zeroone{\nelist{\field}{;}} \verb!}! \verb:.:\\ + & & \\ +{\field} & ::= & {\name} : {\type} \\ + & $|$ & {\name} {\typecstr} := {\term} +\end{tabular} +\end{centerframe} +\caption{Syntax for the definition of {\tt Record}} +\label{record-syntax} +\end{figure} + +\noindent In the expression + +\smallskip +{\tt Record} {\ident} {\params} \texttt{:} + {\sort} := {\ident$_0$} \verb+{+ + {\ident$_1$} \texttt{:} {\term$_1$}; + \dots + {\ident$_n$} \texttt{:} {\term$_n$} \verb+}+. +\smallskip + +\noindent the identifier {\ident} is the name of the defined record +and {\sort} is its type. The identifier {\ident$_0$} is the name of +its constructor. If {\ident$_0$} is omitted, the default name {\tt +Build\_{\ident}} is used. The identifiers {\ident$_1$}, .., +{\ident$_n$} are the names of fields and {\term$_1$}, .., {\term$_n$} +their respective types. Remark that the type of {\ident$_i$} may +depend on the previous {\ident$_j$} (for $j bottom; + Rat_irred_cond : + forall x y z:nat, (x * y) = top /\ (x * z) = bottom -> x = 1}. +\end{coq_example} + +Remark here that the field +\verb+Rat_cond+ depends on the field \verb+bottom+. + +%Let us now see the work done by the {\tt Record} macro. +%First the macro generates an inductive definition +%with just one constructor: +% +%\medskip +%\noindent +%{\tt Inductive {\ident} {\binderlet} : {\sort} := \\ +%\mbox{}\hspace{0.4cm} {\ident$_0$} : forall ({\ident$_1$}:{\term$_1$}) .. +%({\ident$_n$}:{\term$_n$}), {\ident} {\rm\sl params}.} +%\medskip + +Let us now see the work done by the {\tt Record} macro. First the +macro generates an inductive definition with just one constructor: +\begin{quote} +{\tt Inductive {\ident} {\params} :{\sort} :=} \\ +\qquad {\tt + {\ident$_0$} ({\ident$_1$}:{\term$_1$}) .. ({\ident$_n$}:{\term$_n$}).} +\end{quote} +To build an object of type {\ident}, one should provide the +constructor {\ident$_0$} with $n$ terms filling the fields of +the record. + +As an example, let us define the rational $1/2$: +\begin{coq_example*} +Require Import Arith. +Theorem one_two_irred : + forall x y z:nat, x * y = 1 /\ x * z = 2 -> x = 1. +\end{coq_example*} +\begin{coq_eval} +Lemma mult_m_n_eq_m_1 : forall m n:nat, m * n = 1 -> m = 1. +destruct m; trivial. +intros; apply f_equal with (f := S). +destruct m; trivial. +destruct n; simpl in H. + rewrite <- mult_n_O in H. + discriminate. + rewrite <- plus_n_Sm in H. + discriminate. +Qed. + +intros x y z [H1 H2]. + apply mult_m_n_eq_m_1 with (n := y); trivial. +\end{coq_eval} +\ldots +\begin{coq_example*} +Qed. +\end{coq_example*} +\begin{coq_example} +Definition half := mkRat true 1 2 (O_S 1) one_two_irred. +\end{coq_example} +\begin{coq_example} +Check half. +\end{coq_example} + +The macro generates also, when it is possible, the projection +functions for destructuring an object of type {\ident}. These +projection functions have the same name that the corresponding +fields. If a field is named ``\verb=_='' then no projection is built +for it. In our example: + +\begin{coq_example} +Eval compute in half.(top). +Eval compute in half.(bottom). +Eval compute in half.(Rat_bottom_cond). +\end{coq_example} +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{Warnings} +\item {\tt Warning: {\ident$_i$} cannot be defined.} + + It can happen that the definition of a projection is impossible. + This message is followed by an explanation of this impossibility. + There may be three reasons: + \begin{enumerate} + \item The name {\ident$_i$} already exists in the environment (see + Section~\ref{Axiom}). + \item The body of {\ident$_i$} uses an incorrect elimination for + {\ident} (see Sections~\ref{Fixpoint} and~\ref{Caseexpr}). + \item The type of the projections {\ident$_i$} depends on previous + projections which themselves couldn't be defined. + \end{enumerate} +\end{Warnings} + +\begin{ErrMsgs} + +\item \errindex{A record cannot be recursive} + + The record name {\ident} appears in the type of its fields. + +\item During the definition of the one-constructor inductive + definition, all the errors of inductive definitions, as described in + Section~\ref{gal_Inductive_Definitions}, may also occur. + +\end{ErrMsgs} + +\SeeAlso Coercions and records in Section~\ref{Coercions-and-records} +of the chapter devoted to coercions. + +\Rem {\tt Structure} is a synonym of the keyword {\tt Record}. + +\Rem An experimental syntax for projections based on a dot notation is +available. The command to activate it is +\begin{quote} +{\tt Set Printing Projections.} +\end{quote} + +\begin{figure}[t] +\begin{centerframe} +\begin{tabular}{lcl} +{\term} & ++= & {\term} {\tt .(} {\qualid} {\tt )}\\ + & $|$ & {\term} {\tt .(} {\qualid} \nelist{\termarg}{} {\tt )}\\ + & $|$ & {\term} {\tt .(} {@}{\qualid} \nelist{\term}{} {\tt )} +\end{tabular} +\end{centerframe} +\caption{Syntax of \texttt{Record} projections} +\label{fig:projsyntax} +\end{figure} + +The corresponding grammar rules are given Figure~\ref{fig:projsyntax}. +When {\qualid} denotes a projection, the syntax {\tt + {\term}.({\qualid})} is equivalent to {\qualid~\term}, the syntax +{\tt {\term}.({\qualid}~{\termarg}$_1$~ \ldots~ {\termarg}$_n$)} to +{\qualid~{\termarg}$_1$ \ldots {\termarg}$_n$~\term}, and the syntax +{\tt {\term}.(@{\qualid}~{\term}$_1$~\ldots~{\term}$_n$)} to +{@\qualid~{\term}$_1$ \ldots {\term}$_n$~\term}. In each case, {\term} +is the object projected and the other arguments are the parameters of +the inductive type. + +To deactivate the printing of projections, use +{\tt Unset Printing Projections}. + + +\section{Variants and extensions of {\tt match} +\label{Extensions-of-match} +\index{match@{\tt match\ldots with\ldots end}}} + +\subsection{Multiple and nested pattern-matching +\index{ML-like patterns} +\label{Mult-match}} + +The basic version of \verb+match+ allows pattern-matching on simple +patterns. As an extension, multiple and nested patterns are +allowed, as in ML-like languages. + +The extension just acts as a macro that is expanded during parsing +into a sequence of {\tt match} on simple patterns. Especially, a +construction defined using the extended {\tt match} is printed under +its expanded form. + +\SeeAlso chapter \ref{Mult-match-full}. + +\subsection{Pattern-matching on boolean values: the {\tt if} expression +\index{if@{\tt if ... then ... else}}} + +For inductive types with exactly two constructors and for +pattern-matchings expressions which do not depend on the arguments of +the constructors, it is possible to use a {\tt if ... then ... else} +notation. For instance, the definition + +\begin{coq_example} +Definition not (b:bool) := + match b with + | true => false + | false => true + end. +\end{coq_example} + +can be alternatively written + +\begin{coq_eval} +Reset not. +\end{coq_eval} +\begin{coq_example} +Definition not (b:bool) := if b then false else true. +\end{coq_example} + +More generally, for an inductive type with constructors {\tt C$_1$} +and {\tt C$_2$}, we have the following equivalence + +\smallskip + +{\tt if {\term} \zeroone{\ifitem} then {\term}$_1$ else {\term}$_2$} $\equiv$ +\begin{tabular}[c]{l} +{\tt match {\term} \zeroone{\ifitem} with}\\ +{\tt \verb!|! C$_1$ \_ {\ldots} \_ \verb!=>! {\term}$_1$} \\ +{\tt \verb!|! C$_2$ \_ {\ldots} \_ \verb!=>! {\term}$_2$} \\ +{\tt end} +\end{tabular} + +Here is an example. + +\begin{coq_example} +Check (fun x (H:{x=0}+{x<>0}) => + match H with + | left _ => true + | right _ => false + end). +\end{coq_example} + +Notice that the printing uses the {\tt if} syntax because {\tt sumbool} is +declared as such (see section \ref{printing-options}). + +\subsection{Irrefutable patterns: the destructuring {\tt let} +\index{let in@{\tt let ... in}} +\label{Letin}} + + + +Closed terms (that is not relying on any axiom or variable) in an +inductive type having only one constructor, say {\tt foo}, have +necessarily the form \texttt{(foo ...)}. In this case, the {\tt match} +construction can be written with a syntax close to the {\tt let ... in +...} construction. Expression {\tt let +(}~{\ident$_1$},\ldots,{\ident$_n$}~{\tt ) :=}~{\term$_0$}~{\tt +in}~{\term$_1$} performs case analysis on {\term$_0$} which must be in +an inductive type with one constructor with $n$ arguments. Variables +{\ident$_1$}\ldots{\ident$_n$} are bound to the $n$ arguments of the +constructor in expression {\term$_1$}. For instance, the definition + +\begin{coq_example} +Definition fst (A B:Set) (H:A * B) := match H with + | pair x y => x + end. +\end{coq_example} + +can be alternatively written + +\begin{coq_eval} +Reset fst. +\end{coq_eval} +\begin{coq_example} +Definition fst (A B:Set) (p:A * B) := let (x, _) := p in x. +\end{coq_example} +Note however that reduction is slightly different from regular {\tt +let ... in ...} construction since it can occur only if {\term$_0$} +can be put in constructor form. Otherwise, reduction is blocked. + +The pretty-printing of a definition by matching on a +irrefutable pattern can either be done using {\tt match} or the {\tt +let} construction (see Section~\ref{printing-options}). + +The general equivalence for an inductive type with one constructors {\tt C} is + +\smallskip +{\tt let ({\ident}$_1$,\ldots,{\ident}$_n$) \zeroone{\ifitem} := {\term} in {\term}'} \\ +$\equiv$~ +{\tt match {\term} \zeroone{\ifitem} with C {\ident}$_1$ {\ldots} {\ident}$_n$ \verb!=>! {\term}' end} + +\subsection{Options for pretty-printing of {\tt match} +\label{printing-options}} + +There are three options controlling the pretty-printing of {\tt match} +expressions. + +\subsubsection{Printing of wildcard pattern +\comindex{Set Printing Wildcard} +\comindex{Unset Printing Wildcard} +\comindex{Test Printing Wildcard}} + +Some variables in a pattern may not occur in the right-hand side of +the pattern-matching clause. There are options to control the +display of these variables. + +\begin{quote} +{\tt Set Printing Wildcard.} +\end{quote} +The variables having no occurrences in the right-hand side of the +pattern-matching clause are just printed using the wildcard symbol +``{\tt \_}''. + +\begin{quote} +{\tt Unset Printing Wildcard.} +\end{quote} +The variables, even useless, are printed using their usual name. But some +non dependent variables have no name. These ones are still printed +using a ``{\tt \_}''. + +\begin{quote} +{\tt Test Printing Wildcard.} +\end{quote} +This tells if the wildcard printing mode is on or off. The default is +to print wildcard for useless variables. + +\subsubsection{Printing of the elimination predicate +\comindex{Set Printing Synth} +\comindex{Unset Printing Synth} +\comindex{Test Printing Synth}} + +In most of the cases, the type of the result of a matched term is +mechanically synthesisable. Especially, if the result type does not +depend of the matched term. + +\begin{quote} +{\tt Set Printing Synth.} +\end{quote} +The result type is not printed when {\Coq} knows that it can +re-synthesise it. + +\begin{quote} +{\tt Unset Printing Synth.} +\end{quote} +This forces the result type to be always printed. + +\begin{quote} +{\tt Test Printing Synth.} +\end{quote} +This tells if the non-printing of synthesisable types is on or off. +The default is to not print synthesisable types. + +\subsubsection{Printing matching on irrefutable pattern +\comindex{Add Printing Let {\ident}} +\comindex{Remove Printing Let {\ident}} +\comindex{Test Printing Let {\ident}} +\comindex{Print Table Printing Let}} + +If an inductive type has just one constructor, +pattern-matching can be written using {\tt let} ... {\tt :=} +... {\tt in}~... + +\begin{quote} +{\tt Add Printing Let {\ident}.} +\end{quote} +This adds {\ident} to the list of inductive types for which +pattern-matching is written using a {\tt let} expression. + +\begin{quote} +{\tt Remove Printing Let {\ident}.} +\end{quote} +This removes {\ident} from this list. + +\begin{quote} +{\tt Test Printing Let {\ident}.} +\end{quote} +This tells if {\ident} belongs to the list. + +\begin{quote} +{\tt Print Table Printing Let.} +\end{quote} +This prints the list of inductive types for which pattern-matching is +written using a {\tt let} expression. + +The list of inductive types for which pattern-matching is written +using a {\tt let} expression is managed synchronously. This means that +it is sensible to the command {\tt Reset}. + +\subsubsection{Printing matching on booleans +\comindex{Add Printing If {\ident}} +\comindex{Remove Printing If {\ident}} +\comindex{Test Printing If {\ident}} +\comindex{Print Table Printing If}} + +If an inductive type is isomorphic to the boolean type, +pattern-matching can be written using {\tt if} ... {\tt then} ... {\tt + else} ... + +\begin{quote} +{\tt Add Printing If {\ident}.} +\end{quote} +This adds {\ident} to the list of inductive types for which +pattern-matching is written using an {\tt if} expression. + +\begin{quote} +{\tt Remove Printing If {\ident}.} +\end{quote} +This removes {\ident} from this list. + +\begin{quote} +{\tt Test Printing If {\ident}.} +\end{quote} +This tells if {\ident} belongs to the list. + +\begin{quote} +{\tt Print Table Printing If.} +\end{quote} +This prints the list of inductive types for which pattern-matching is +written using an {\tt if} expression. + +The list of inductive types for which pattern-matching is written +using an {\tt if} expression is managed synchronously. This means that +it is sensible to the command {\tt Reset}. + +\subsubsection{Example} + +This example emphasizes what the printing options offer. + +\begin{coq_example} +Test Printing Let prod. +Print fst. +Remove Printing Let prod. +Unset Printing Synth. +Unset Printing Wildcard. +Print fst. +\end{coq_example} + +% \subsection{Still not dead old notations} + +% The following variant of {\tt match} is inherited from older version +% of {\Coq}. + +% \medskip +% \begin{tabular}{lcl} +% {\term} & ::= & {\annotation} {\tt Match} {\term} {\tt with} {\terms} {\tt end}\\ +% \end{tabular} +% \medskip + +% This syntax is a macro generating a combination of {\tt match} with {\tt +% Fix} implementing a combinator for primitive recursion equivalent to +% the {\tt Match} construction of \Coq\ V5.8. It is provided only for +% sake of compatibility with \Coq\ V5.8. It is recommended to avoid it. +% (see section~\ref{Matchexpr}). + +% There is also a notation \texttt{Case} that is the +% ancestor of \texttt{match}. Again, it is still in the code for +% compatibility with old versions but the user should not use it. + +% Explained in RefMan-gal.tex +%% \section{Forced type} + +%% In some cases, one may wish to assign a particular type to a term. The +%% syntax to force the type of a term is the following: + +%% \medskip +%% \begin{tabular}{lcl} +%% {\term} & ++= & {\term} {\tt :} {\term}\\ +%% \end{tabular} +%% \medskip + +%% It forces the first term to be of type the second term. The +%% type must be compatible with +%% the term. More precisely it must be either a type convertible to +%% the automatically inferred type (see chapter \ref{Cic}) or a type +%% coercible to it, (see \ref{Coercions}). When the type of a +%% whole expression is forced, it is usually not necessary to give the types of +%% the variables involved in the term. + +%% Example: + +%% \begin{coq_example} +%% Definition ID := forall X:Set, X -> X. +%% Definition id := (fun X x => x):ID. +%% Check id. +%% \end{coq_example} + +\section{Section mechanism +\index{Sections} +\label{Section}} + +The sectioning mechanism allows to organise a proof in structured +sections. Then local declarations become available (see +Section~\ref{Simpl-definitions}). + +\subsection{\tt Section {\ident}\comindex{Section}} + +This command is used to open a section named {\ident}. + +%% Discontinued ? +%% \begin{Variants} +%% \comindex{Chapter} +%% \item{\tt Chapter {\ident}}\\ +%% Same as {\tt Section {\ident}} +%% \end{Variants} + +\subsection{\tt End {\ident} +\comindex{End}} + +This command closes the section named {\ident}. When a section is +closed, all local declarations (variables and local definitions) are +{\em discharged}. This means that all global objects defined in the +section are generalised with respect to all variables and local +definitions it depends on in the section. None of the local +declarations (considered as autonomous declarations) survive the end +of the section. + +Here is an example : +\begin{coq_example} +Section s1. +Variables x y : nat. +Let y' := y. +Definition x' := S x. +Definition x'' := x' + y'. +Print x'. +End s1. +Print x'. +Print x''. +\end{coq_example} +Notice the difference between the value of {\tt x'} and {\tt x''} +inside section {\tt s1} and outside. + +\begin{ErrMsgs} +\item \errindex{This is not the last opened section} +\end{ErrMsgs} + +\begin{Remarks} +\item Most commands, like {\tt Hint}, {\tt Notation}, option management, ... +which appear inside a section are cancelled when the +section is closed. +% cf section \ref{LongNames} +%\item Usually all identifiers must be distinct. +%However, a name already used in a closed section (see \ref{Section}) +%can be reused. In this case, the old name is no longer accessible. + +% Obsolète +%\item A module implicitly open a section. Be careful not to name a +%module with an identifier already used in the module (see \ref{compiled}). +\end{Remarks} + +\input{RefMan-mod.v} + +\section{Libraries and qualified names} + +\subsection{Names of libraries and files +\label{Libraries} +\index{Libraries} +\index{Logical paths}} + +\paragraph{Libraries} + +The theories developed in {\Coq} are stored in {\em libraries}. A +library is characterised by a name called {\it root} of the +library. The standard library of {\Coq} has root name {\tt Coq} and is +known by default when a {\Coq} session starts. + +Libraries have a tree structure. E.g., the {\tt Coq} library +contains the sub-libraries {\tt Init}, {\tt Logic}, {\tt Arith}, {\tt +Lists}, ... The ``dot notation'' is used to separate the different +component of a library name. For instance, the {\tt Arith} library of +{\Coq} standard library is written ``{\tt Coq.Arith}''. + +\medskip +\Rem no blank is allowed between the dot and the identifier on its +right, otherwise the dot is interpreted as the full stop (period) of +the command! +\medskip + +\paragraph{Physical paths vs logical paths} + +Libraries and sub-libraries are denoted by {\em logical directory +paths} (written {\dirpath} and of which the syntax is the same as +{\qualid}, see \ref{qualid}). Logical directory +paths can be mapped to physical directories of the +operating system using the command (see \ref{AddLoadPath}) +\begin{quote} +{\tt Add LoadPath {\it physical\_path} as {\dirpath}}. +\end{quote} +A library can inherit the tree structure of a physical directory by +using the {\tt -R} option to {\tt coqtop} or the +command (see \ref{AddRecLoadPath}) +\begin{quote} +{\tt Add Rec LoadPath {\it physical\_path} as {\dirpath}}. +\end{quote} + +\Rem When used interactively with {\tt coqtop} command, {\Coq} opens a +library called {\tt Top}. + +\paragraph{The file level} + +At some point, (sub-)libraries contain {\it modules} which coincide +with files at the physical level. As for sublibraries, the dot +notation is used to denote a specific module of a library. Typically, +{\tt Coq.Init.Logic} is the logical path associated to the file {\tt + Logic.v} of {\Coq} standard library. Notice that compilation (see +\ref{Addoc-coqc}) is done at the level of files. + +If the physical directory where a file {\tt File.v} lies is mapped to +the empty logical directory path (which is the default when using the +simple form of {\tt Add LoadPath} or {\tt -I} option to coqtop), then +the name of the module it defines is {\tt File}. + +\subsection{Qualified names +\label{LongNames} +\index{Qualified identifiers} +\index{Absolute names}} + +Modules contain constructions (sub-modules, axioms, parameters, +definitions, lemmas, theorems, remarks or facts). The (full) name of a +construction starts with the logical name of the module in which it is defined +followed by the (short) name of the construction. +Typically, the full name {\tt Coq.Init.Logic.eq} denotes Leibniz' equality +defined in the module {\tt Logic} in the sublibrary {\tt Init} of the +standard library of \Coq. + +\paragraph{Absolute, partially qualified and short names} + +The full name of a library, module, section, definition, theorem, +... is its {\it absolute name}. The last identifier ({\tt eq} in the +previous example) is its {\it short name} (or sometimes {\it base +name}). Any suffix of the absolute name is a {\em partially qualified +name} (e.g. {\tt Logic.eq} is a partially qualified name for {\tt +Coq.Init.Logic.eq}). Partially qualified names (shortly {\em +qualified name}) are also built from identifiers separated by dots. +They are written {\qualid} in the documentation. + +{\Coq} does not accept two constructions (definition, theorem, ...) +with the same absolute name but different constructions can have the +same short name (or even same partially qualified names as soon as the +full names are different). + +\paragraph{Visibility} + +{\Coq} maintains a {\it name table} mapping qualified names to absolute +names. This table is modified by the commands {\tt Require} (see +\ref{Require}), {\tt Import} and {\tt Export} (see \ref{Import}) and +also each time a new declaration is added to the context. + +An absolute name is called {\it visible} from a given short or +partially qualified name when this name suffices to denote it. This +means that the short or partially qualified name is mapped to the absolute +name in {\Coq} name table. + +It may happen that a visible name is hidden by the short name or a +qualified name of another construction. In this case, the name that +has been hidden must be referred to using one more level of +qualification. Still, to ensure that a construction always remains +accessible, absolute names can never be hidden. + +Examples: +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example} +Check 0. +Definition nat := bool. +Check 0. +Check Datatypes.nat. +Locate nat. +\end{coq_example} + +\Rem There is also a name table for sublibraries, modules and sections. + +\Rem In versions prior to {\Coq} 7.4, lemmas declared with {\tt +Remark} and {\tt Fact} kept in their full name the names of the +sections in which they were defined. Since {\Coq} 7.4, they strictly +behaves as {\tt Theorem} and {\tt Lemma} do. + +\SeeAlso Command {\tt Locate} in Section~\ref{Locate}. + +%% \paragraph{The special case of remarks and facts} +%% +%% In contrast with definitions, lemmas, theorems, axioms and parameters, +%% the absolute name of remarks includes the segment of sections in which +%% it is defined. Concretely, if a remark {\tt R} is defined in +%% subsection {\tt S2} of section {\tt S1} in module {\tt M}, then its +%% absolute name is {\tt M.S1.S2.R}. The same for facts, except that the +%% name of the innermost section is dropped from the full name. Then, if +%% a fact {\tt F} is defined in subsection {\tt S2} of section {\tt S1} +%% in module {\tt M}, then its absolute name is {\tt M.S1.F}. + + +\paragraph{Requiring a file} + +A module compiled in a ``.vo'' file comes with a logical names (e.g. +physical file \verb!theories/Init/Datatypes.vo! in the {\Coq} installation directory is bound to the logical module {\tt Coq.Init.Datatypes}). +When requiring the file, the mapping between physical directories and logical library should be consistent with the mapping used to compile the file (for modules of the standard library, this is automatic -- check it by typing {\tt Print LoadPath}). + +The command {\tt Add Rec LoadPath} is also available from {\tt coqtop} +and {\tt coqc} by using option~\verb=-R=. + +\section{Implicit arguments +\index{Implicit arguments} +\label{Implicit Arguments}} + +An implicit argument of a function is an argument which can be +inferred from the knowledge of the type of other arguments of the +function, or of the type of the surrounding context of the application. +Especially, an implicit argument corresponds to a parameter +dependent in the type of the function. Typical implicit +arguments are the type arguments in polymorphic functions. +More precisely, there are several kinds of implicit arguments. + +\paragraph{Strict Implicit Arguments.} +An implicit argument can be either strict or non strict. An implicit +argument is said {\em strict} if, whatever the other arguments of the +function are, it is still inferable from the type of some other +argument. Technically, an implicit argument is strict if it +corresponds to a parameter which is not applied to a variable which +itself is another parameter of the function (since this parameter +may erase its arguments), not in the body of a {\tt match}, and not +itself applied or matched against patterns (since the original +form of the argument can be lost by reduction). + +For instance, the first argument of +\begin{quote} +\verb|cons: forall A:Set, A -> list A -> list A| +\end{quote} +in module {\tt List.v} is strict because {\tt list} is an inductive +type and {\tt A} will always be inferable from the type {\tt +list A} of the third argument of {\tt cons}. +On the opposite, the second argument of a term of type +\begin{quote} +\verb|forall P:nat->Prop, forall n:nat, P n -> ex nat P| +\end{quote} +is implicit but not strict, since it can only be inferred from the +type {\tt P n} of the the third argument and if {\tt P} is e.g. {\tt +fun \_ => True}, it reduces to an expression where {\tt n} does not +occur any longer. The first argument {\tt P} is implicit but not +strict either because it can only be inferred from {\tt P n} and {\tt +P} is not canonically inferable from an arbitrary {\tt n} and the +normal form of {\tt P n} (consider e.g. that {\tt n} is {\tt 0} and +the third argument has type {\tt True}, then any {\tt P} of the form +{\tt fun n => match n with 0 => True | \_ => \mbox{\em anything} end} would +be a solution of the inference problem. + +\paragraph{Contextual Implicit Arguments.} +An implicit argument can be {\em contextual} or non. An implicit +argument is said {\em contextual} if it can be inferred only from the +knowledge of the type of the context of the current expression. For +instance, the only argument of +\begin{quote} +\verb|nil : forall A:Set, list A| +\end{quote} +is contextual. Similarly, both arguments of a term of type +\begin{quote} +\verb|forall P:nat->Prop, forall n:nat, P n \/ n = 0| +\end{quote} +are contextual (moreover, {\tt n} is strict and {\tt P} is not). + +\subsection{Casual use of implicit arguments} + +In a given expression, if it is clear that some argument of a function +can be inferred from the type of the other arguments, the user can +force the given argument to be guessed by replacing it by ``{\tt \_}''. If +possible, the correct argument will be automatically generated. + +\begin{ErrMsgs} + +\item \errindex{Cannot infer a term for this placeholder} + + {\Coq} was not able to deduce an instantiation of a ``{\tt \_}''. + +\end{ErrMsgs} + +\subsection{Declaration of implicit arguments for a constant +\comindex{Implicit Arguments}} + +In case one wants that some arguments of a given object (constant, +inductive types, constructors, assumptions, local or not) are always +inferred by Coq, one may declare once for all which are the expected +implicit arguments of this object. The syntax is +\begin{quote} +\tt Implicit Arguments {\qualid} [ \nelist{\ident}{} ] +\end{quote} +where the list of {\ident} is the list of parameters to be declared +implicit. After this, implicit arguments can just (and have to) be +skipped in any expression involving an application of {\qualid}. + +\Example +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example*} +Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. +\end{coq_example*} +\begin{coq_example} +Check (cons nat 3 (nil nat)). +Implicit Arguments cons [A]. +Implicit Arguments nil [A]. +Check (cons 3 nil). +\end{coq_example} + +\Rem To know which are the implicit arguments of an object, use command +{\tt Print Implicit} (see \ref{PrintImplicit}). + +\Rem If the list of arguments is empty, the command removes the +implicit arguments of {\qualid}. + +\subsection{Automatic declaration of implicit arguments for a constant} + +{\Coq} can also automatically detect what are the implicit arguments +of a defined object. The command is just +\begin{quote} +\tt Implicit Arguments {\qualid}. +\end{quote} +The auto-detection is governed by options telling if strict and +contextual implicit arguments must be considered or not (see +Sections~\ref{SetStrictImplicit} and~\ref{SetContextualImplicit}). + +\Example +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example*} +Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. +\end{coq_example*} +\begin{coq_example} +Implicit Arguments cons. +Print Implicit cons. +Implicit Arguments nil. +Print Implicit nil. +Set Contextual Implicit. +Implicit Arguments nil. +Print Implicit nil. +\end{coq_example} + +The computation of implicit arguments takes account of the +unfolding of constants. For instance, the variable {\tt p} below has +type {\tt (Transitivity R)} which is reducible to {\tt forall x,y:U, R x +y -> forall z:U, R y z -> R x z}. As the variables {\tt x}, {\tt y} and +{\tt z} appear strictly in body of the type, they are implicit. + +\begin{coq_example*} +Variable X : Type. +Definition Relation := X -> X -> Prop. +Definition Transitivity (R:Relation) := + forall x y:X, R x y -> forall z:X, R y z -> R x z. +Variables (R : Relation) (p : Transitivity R). +Implicit Arguments p. +\end{coq_example*} +\begin{coq_example} +Print p. +Print Implicit p. +\end{coq_example} +\begin{coq_example*} +Variables (a b c : X) (r1 : R a b) (r2 : R b c). +\end{coq_example*} +\begin{coq_example} +Check (p r1 r2). +\end{coq_example} + +\subsection{Mode for automatic declaration of implicit arguments +\label{Auto-implicit} +\comindex{Set Implicit Arguments} +\comindex{Unset Implicit Arguments}} + +In case one wants to systematically declare implicit the arguments +detectable as such, one may switch to the automatic declaration of +implicit arguments mode by using the command +\begin{quote} +\tt Set Implicit Arguments. +\end{quote} +Conversely, one may unset the mode by using {\tt Unset Implicit +Arguments}. The mode is off by default. Auto-detection of implicit +arguments is governed by options controlling whether strict and +contextual implicit arguments have to be considered or not. + +\subsection{Controlling strict implicit arguments +\comindex{Set Strict Implicit} +\comindex{Unset Strict Implicit} +\label{SetStrictImplicit}} + +By default, {\Coq} automatically set implicit only the strict implicit +arguments. To relax this constraint, use command +\begin{quote} +\tt Unset Strict Implicit. +\end{quote} +Conversely, use command {\tt Set Strict Implicit} to +restore the strict implicit mode. + +\Rem In versions of {\Coq} prior to version 8.0, the default was to +declare the strict implicit arguments as implicit. + +\subsection{Controlling contextual implicit arguments +\comindex{Set Contextual Implicit} +\comindex{Unset Contextual Implicit} +\label{SetContextualImplicit}} + +By default, {\Coq} does not automatically set implicit the contextual +implicit arguments. To tell {\Coq} to infer also contextual implicit +argument, use command +\begin{quote} +\tt Set Contextual Implicit. +\end{quote} +Conversely, use command {\tt Unset Contextual Implicit} to +unset the contextual implicit mode. + +\subsection{Explicit Applications +\index{Explicitation of implicit arguments} +\label{Implicits-explicitation} +\index{qualid@{\qualid}}} + +In presence of non strict or contextual argument, or in presence of +partial applications, the synthesis of implicit arguments may fail, so +one may have to give explicitly certain implicit arguments of an +application. The syntax for this is {\tt (\ident:=\term)} where {\ident} +is the name of the implicit argument and {\term} is its corresponding +explicit term. Alternatively, one can locally deactivate the hidding of +implicit arguments of a function by using the notation +{\tt @{\qualid}~{\term}$_1$..{\term}$_n$}. This syntax extension is +given Figure~\ref{fig:explicitations}. +\begin{figure} +\begin{centerframe} +\begin{tabular}{lcl} +{\term} & ++= & @ {\qualid} \nelist{\term}{}\\ +& $|$ & @ {\qualid}\\ +& $|$ & {\qualid} \nelist{\textrm{\textsl{argument}}}{}\\ +\\ +{\textrm{\textsl{argument}}} & ::= & {\term} \\ +& $|$ & {\tt ({\ident}:={\term})}\\ +\end{tabular} +\end{centerframe} +\caption{Syntax for explicitations of implicit arguments} +\label{fig:explicitations} +\end{figure} + +\noindent {\bf Example (continued): } +\begin{coq_example} +Check (p r1 (z:=c)). +Check (p (x:=a) (y:=b) r1 (z:=c) r2). +\end{coq_example} + +\subsection{Displaying what the implicit arguments are +\comindex{Print Implicit} +\label{PrintImplicit}} + +To display the implicit arguments associated to an object use command +\begin{quote} +\tt Print Implicit {\qualid}. +\end{quote} + +\subsection{Explicitation of implicit arguments for pretty-printing +\comindex{Set Printing Implicit} +\comindex{Unset Printing Implicit}} + +By default the basic pretty-printing rules hide the inferable implicit +arguments of an application. To force printing all implicit arguments, +use command +\begin{quote} +{\tt Set Printing Implicit.} +\end{quote} +Conversely, to restore the hidding of implicit arguments, use command +\begin{quote} +{\tt Unset Printing Implicit.} +\end{quote} + +\SeeAlso {\tt Set Printing All} in section \ref{SetPrintingAll}. + +\subsection{Interaction with subtyping} + +When an implicit argument can be inferred from the type of more than +one of the other arguments, then only the type of the first of these +arguments is taken into account, and not an upper type of all of +them. As a consequence, the inference of the implicit argument of +``='' fails in + +\begin{coq_example*} +Check nat = Prop. +\end{coq_example*} + +but succeeds in + +\begin{coq_example*} +Check Prop = nat. +\end{coq_example*} + +\subsection{Canonical structures +\comindex{Canonical Structure}} + +A canonical structure is an instance of a record/structure type that +can be used to solve equations involving implicit arguments. Assume +that {\qualid} denotes an object $(Build\_struc~ c_1~ \ldots~ c_n)$ in the +structure {\em struct} of which the fields are $x_1$, ..., +$x_n$. Assume that {\qualid} is declared as a canonical structure +using the command +\begin{quote} +{\tt Canonical Structure {\qualid}.} +\end{quote} +Then, each time an equation of the form $(x_i~ +\_)=_{\beta\delta\iota\zeta}c_i$ has to be solved during the +type-checking process, {\qualid} is used as a solution. Otherwise +said, {\qualid} is canonically used to extend the field $c_i$ into a +complete structure built on $c_i$. + +Canonical structures are particularly useful when mixed with +coercions and strict implicit arguments. Here is an example. +\begin{coq_example*} +Require Import Relations. +Require Import EqNat. +Set Implicit Arguments. +Unset Strict Implicit. +Structure Setoid : Type := + {Carrier :> Set; + Equal : relation Carrier; + Prf_equiv : equivalence Carrier Equal}. +Definition is_law (A B:Setoid) (f:A -> B) := + forall x y:A, Equal x y -> Equal (f x) (f y). +Axiom eq_nat_equiv : equivalence nat eq_nat. +Definition nat_setoid : Setoid := Build_Setoid eq_nat_equiv. +Canonical Structure nat_setoid. +\end{coq_example*} + +Thanks to \texttt{nat\_setoid} declared as canonical, the implicit +arguments {\tt A} and {\tt B} can be synthesised in the next statement. +\begin{coq_example} +Lemma is_law_S : is_law S. +\end{coq_example} + +\Rem If a same field occurs in several canonical structure, then +only the structure declared first as canonical is considered. + +\begin{Variants} +\item {\tt Canonical Structure {\ident} := {\term} : {\type}.}\\ + {\tt Canonical Structure {\ident} := {\term}.}\\ + {\tt Canonical Structure {\ident} : {\type} := {\term}.} + +These are equivalent to a regular definition of {\ident} followed by +the declaration + +{\tt Canonical Structure {\ident}}. +\end{Variants} + +\SeeAlso more examples in user contribution \texttt{category} +(\texttt{Rocq/ALGEBRA}). + +\subsection{Implicit types of variables} + +It is possible to bind variable names to a given type (e.g. in a +development using arithmetic, it may be convenient to bind the names +{\tt n} or {\tt m} to the type {\tt nat} of natural numbers). The +command for that is +\begin{quote} +\tt Implicit Types \nelist{\ident}{} : {\type} +\end{quote} +The effect of the command is to automatically set the type of bound +variables starting with {\ident} (either {\ident} itself or +{\ident} followed by one or more single quotes, underscore or digits) +to be {\type} (unless the bound variable is already declared with an +explicit type in which case, this latter type is considered). + +\Example +\begin{coq_example} +Require Import List. +Implicit Types m n : nat. +Lemma cons_inj_nat : forall m n l, n :: l = m :: l -> n = m. +intros m n. +Lemma cons_inj_bool : forall (m n:bool) l, n :: l = m :: l -> n = m. +\end{coq_example} + +\begin{Variants} +\item {\tt Implicit Type {\ident} : {\type}}\\ +This is useful for declaring the implicit type of a single variable. +\end{Variants} + +\section{Coercions +\label{Coercions} +\index{Coercions}} + +Coercions can be used to implicitly inject terms from one {\em class} in +which they reside into another one. A {\em class} is either a sort +(denoted by the keyword {\tt Sortclass}), a product type (denoted by the +keyword {\tt Funclass}), or a type constructor (denoted by its name), +e.g. an inductive type or any constant with a type of the form +\texttt{forall} $(x_1:A_1) .. (x_n:A_n),~s$ where $s$ is a sort. + +Then the user is able to apply an +object that is not a function, but can be coerced to a function, and +more generally to consider that a term of type A is of type B provided +that there is a declared coercion between A and B. The main command is +\comindex{Coercion} +\begin{quote} +\tt Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}. +\end{quote} +which declares the construction denoted by {\qualid} as a +coercion between {\class$_1$} and {\class$_2$}. + +More details and examples, and a description of the commands related +to coercions are provided in chapter \ref{Coercions-full}. + +\section{Printing constructions in full} +\label{SetPrintingAll} +\comindex{Set Printing All} +\comindex{Unset Printing All} + +Coercions, implicit arguments, the type of pattern-matching, but also +notations (see chapter \ref{Addoc-syntax}) can obfuscate the behavior +of some tactics (typically the tactics applying to occurrences of +subterms are sensitive to the implicit arguments). The command +\begin{quote} +{\tt Set Printing All.} +\end{quote} +deactivates all high-level printing features such as coercions, +implicit arguments, returned type of pattern-matching, notations and +various syntactic sugar for pattern-matching or record projections. +Otherwise said, {\tt Set Printing All} includes the effects +of the commands {\tt Set Printing Implicit}, {\tt Set Printing +Coercions}, {\tt Set Printing Synth}, {\tt Unset Printing Projections} +and {\tt Unset Printing Notations}. To reactivate the high-level +printing features, use the command +\begin{quote} +{\tt Unset Printing All.} +\end{quote} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-gal.tex b/doc/refman/RefMan-gal.tex new file mode 100644 index 00000000..ce2b75b8 --- /dev/null +++ b/doc/refman/RefMan-gal.tex @@ -0,0 +1,1451 @@ +\chapter{The \gallina{} specification language +\label{Gallina}\index{Gallina}} + +This chapter describes \gallina, the specification language of {\Coq}. +It allows to develop mathematical theories and to prove specifications +of programs. The theories are built from axioms, hypotheses, +parameters, lemmas, theorems and definitions of constants, functions, +predicates and sets. The syntax of logical objects involved in +theories is described in section \ref{term}. The language of +commands, called {\em The Vernacular} is described in section +\ref{Vernacular}. + +In {\Coq}, logical objects are typed to ensure their logical +correctness. The rules implemented by the typing algorithm are described in +chapter \ref{Cic}. + +\subsection*{About the grammars in the manual +\label{BNF-syntax}\index{BNF metasyntax}} + +Grammars are presented in Backus-Naur form (BNF). Terminal symbols are +set in {\tt typewriter font}. In addition, there are special +notations for regular expressions. + +An expression enclosed in square brackets \zeroone{\ldots} means at +most one occurrence of this expression (this corresponds to an +optional component). + +The notation ``\nelist{\entry}{sep}'' stands for a non empty +sequence of expressions parsed by {\entry} and +separated by the literal ``{\tt sep}''\footnote{This is similar to the +expression ``{\entry} $\{$ {\tt sep} {\entry} $\}$'' in +standard BNF, or ``{\entry} $($ {\tt sep} {\entry} $)$*'' in +the syntax of regular expressions.}. + +Similarly, the notation ``\nelist{\entry}{}'' stands for a non +empty sequence of expressions parsed by the ``{\entry}'' entry, +without any separator between. + +At the end, the notation ``\sequence{\entry}{\tt sep}'' stands for a +possibly empty sequence of expressions parsed by the ``{\entry}'' entry, +separated by the literal ``{\tt sep}''. + +\section{Lexical conventions +\label{lexical}\index{Lexical conventions}} + +\paragraph{Blanks} +Space, newline and horizontal tabulation are considered as blanks. +Blanks are ignored but they separate tokens. + +\paragraph{Comments} + +Comments in {\Coq} are enclosed between {\tt (*} and {\tt + *)}\index{Comments}, and can be nested. They can contain any +character. However, string literals must be correctly closed. Comments +are treated as blanks. + +\paragraph{Identifiers and access identifiers} + +Identifiers, written {\ident}, are sequences of letters, digits, +\verb!_! and \verb!'!, that do not start with a digit or \verb!'!. +That is, they are recognized by the following lexical class: + +\index{ident@\ident} +\begin{center} +\begin{tabular}{rcl} +{\firstletter} & ::= & {\tt a..z} $\mid$ {\tt A..Z} $\mid$ {\tt \_} +% $\mid$ {\tt unicode-letter} +\\ +{\subsequentletter} & ::= & {\tt a..z} $\mid$ {\tt A..Z} $\mid$ {\tt 0..9} +$\mid$ {\tt \_} % $\mid$ {\tt \$} +$\mid$ {\tt '} \\ +{\ident} & ::= & {\firstletter} \sequencewithoutblank{\subsequentletter}{} +\end{tabular} +\end{center} +All characters are meaningful. In particular, identifiers are case-sensitive. +Access identifiers, written {\accessident}, are identifiers prefixed +by \verb!.! (dot) without blank. They are used in the syntax of qualified +identifiers. + +\paragraph{Natural numbers and integers} +Numerals are sequences of digits. Integers are numerals optionally preceded by a minus sign. + +\index{num@{\num}} +\index{integer@{\integer}} +\begin{center} +\begin{tabular}{r@{\quad::=\quad}l} +{\digit} & {\tt 0..9} \\ +{\num} & \nelistwithoutblank{\digit}{} \\ +{\integer} & \zeroone{\tt -}{\num} \\ +\end{tabular} +\end{center} + +\paragraph{Strings} +\label{strings} +\index{string@{\qstring}} +Strings are delimited by \verb!"! (double quote), and enclose a +sequence of any characters different from \verb!"! or the sequence +\verb!""! to denote the double quote character. In grammars, the +entry for quoted strings is {\qstring}. + +\paragraph{Keywords} +The following identifiers are reserved keywords, and cannot be +employed otherwise: +\begin{center} +\begin{tabular}{llllll} +\verb!_! & +\verb!as! & +\verb!at! & +\verb!cofix! & +\verb!else! & +\verb!end! \\ +% +\verb!exists! & +\verb!exists2! & +\verb!fix! & +\verb!for! & +\verb!forall! & +\verb!fun! \\ +% +\verb!if! & +\verb!IF! & +\verb!in! & +\verb!let! & +\verb!match! & +\verb!mod! \\ +% +\verb!Prop! & +\verb!return! & +\verb!Set! & +\verb!then! & +\verb!Type! & +\verb!using! \\ +% +\verb!where! & +\verb!with! & +\end{tabular} +\end{center} + + +\paragraph{Special tokens} +The following sequences of characters are special tokens: +\begin{center} +\begin{tabular}{lllllll} +\verb/!/ & +\verb!%! & +\verb!&! & +\verb!&&! & +\verb!(! & +\verb!()! & +\verb!)! \\ +% +\verb!*! & +\verb!+! & +\verb!++! & +\verb!,! & +\verb!-! & +\verb!->! & +\verb!.! \\ +% +\verb!.(! & +\verb!..! & +\verb!/! & +\verb!/\! & +\verb!:! & +\verb!::! & +\verb!:! & +\verb!;! & +\verb!! & +\verb!<:! \\ +% +\verb!<=! & +\verb!<>! & +\verb!=! & +\verb!=>! & +\verb!=_D! & +\verb!>! & +\verb!>->! \\ +% +\verb!>=! & +\verb!?! & +\verb!?=! & +\verb!@! & +\verb![! & +\verb!\/! & +\verb!]! \\ +% +\verb!^! & +\verb!{! & +\verb!|! & +\verb!|-! & +\verb!||! & +\verb!}! & +\verb!~! \\ +\end{tabular} +\end{center} + +Lexical ambiguities are resolved according to the ``longest match'' +rule: when a sequence of non alphanumerical characters can be decomposed +into several different ways, then the first token is the longest +possible one (among all tokens defined at this moment), and so on. + +\section{Terms \label{term}\index{Terms}} + +\subsection{Syntax of terms} + +Figures \ref{term-syntax} and \ref{term-syntax-aux} describe the basic +set of terms which form the {\em Calculus of Inductive Constructions} +(also called \CIC). The formal presentation of {\CIC} is given in +chapter \ref{Cic}. Extensions of this syntax are given in chapter +\ref{Gallina-extension}. How to customize the syntax is described in +chapter \ref{Addoc-syntax}. + +\begin{figure}[htbp] +\begin{centerframe} +\begin{tabular}{lcl@{\qquad}r} +{\term} & ::= & + {\tt forall} {\binderlist} {\tt ,} {\term} &(\ref{products})\\ + & $|$ & {\tt fun} {\binderlist} {\tt =>} {\term} &(\ref{abstractions})\\ + & $|$ & {\tt fix} {\fixpointbodies} &(\ref{fixpoints})\\ + & $|$ & {\tt cofix} {\cofixpointbodies} &(\ref{fixpoints})\\ + & $|$ & {\tt let} {\idparams} {\tt :=} {\term} + {\tt in} {\term} &(\ref{let-in})\\ + & $|$ & {\tt let fix} {\fixpointbody} {\tt in} {\term} &(\ref{fixpoints})\\ + & $|$ & {\tt let cofix} {\cofixpointbody} + {\tt in} {\term} &(\ref{fixpoints})\\ + & $|$ & {\tt let} {\tt (} \sequence{\name}{,} {\tt )} \zeroone{\ifitem} + {\tt :=} {\term} + {\tt in} {\term} &(\ref{caseanalysis}, \ref{Mult-match})\\ + & $|$ & {\tt if} {\term} \zeroone{\ifitem} {\tt then} {\term} + {\tt else} {\term} &(\ref{caseanalysis}, \ref{Mult-match})\\ + & $|$ & {\term} {\tt :} {\term} &(\ref{typecast})\\ + & $|$ & {\term} {\tt ->} {\term} &(\ref{products})\\ + & $|$ & {\term} \nelist{\termarg}{}&(\ref{applications})\\ + & $|$ & {\tt @} {\qualid} \sequence{\term}{} + &(\ref{Implicits-explicitation})\\ + & $|$ & {\term} {\tt \%} {\ident} &(\ref{scopechange})\\ + & $|$ & {\tt match} \nelist{\caseitem}{\tt ,} + \zeroone{\returntype} {\tt with} &\\ + && ~~~\zeroone{\zeroone{\tt |} \nelist{\eqn}{|}} {\tt end} + &(\ref{caseanalysis})\\ + & $|$ & {\qualid} &(\ref{qualid})\\ + & $|$ & {\sort} &(\ref{Gallina-sorts})\\ + & $|$ & {\num} &(\ref{numerals})\\ + & $|$ & {\_} &(\ref{hole})\\ + & & &\\ +{\termarg} & ::= & {\term} &\\ + & $|$ & {\tt (} {\ident} {\tt :=} {\term} {\tt )} + &(\ref{Implicits-explicitation})\\ +%% & $|$ & {\tt (} {\num} {\tt :=} {\term} {\tt )} +%% &(\ref{Implicits-explicitation})\\ +&&&\\ +{\binderlist} & ::= & \nelist{\name}{} {\typecstr} & \ref{Binders} \\ + & $|$ & {\binder} \nelist{\binderlet}{} &\\ +&&&\\ +{\binder} & ::= & {\name} & \ref{Binders} \\ + & $|$ & {\tt (} \nelist{\name}{} {\tt :} {\term} {\tt )} &\\ +&&&\\ +{\binderlet} & ::= & {\binder} & \ref{Binders} \\ + & $|$ & {\tt (} {\name} {\typecstr} {\tt :=} {\term} {\tt )} &\\ +& & &\\ +{\name} & ::= & {\ident} &\\ + & $|$ & {\tt \_} &\\ +&&&\\ +{\qualid} & ::= & {\ident} & \\ + & $|$ & {\qualid} {\accessident} &\\ + & & &\\ +{\sort} & ::= & {\tt Prop} ~$|$~ {\tt Set} ~$|$~ {\tt Type} & +\end{tabular} +\end{centerframe} +\caption{Syntax of terms} +\label{term-syntax} +\index{term@{\term}} +\index{sort@{\sort}} +\end{figure} + + + +\begin{figure}[htb] +\begin{centerframe} +\begin{tabular}{lcl} +{\idparams} & ::= & {\ident} \sequence{\binderlet}{} {\typecstr} \\ +&&\\ +{\fixpointbodies} & ::= & + {\fixpointbody} \\ + & $|$ & {\fixpointbody} {\tt with} \nelist{\fixpointbody}{{\tt with}} + {\tt for} {\ident} \\ +{\cofixpointbodies} & ::= & + {\cofixpointbody} \\ + & $|$ & {\cofixpointbody} {\tt with} \nelist{\cofixpointbody}{{\tt with}} + {\tt for} {\ident} \\ +&&\\ +{\fixpointbody} & ::= & + {\ident} \nelist{\binderlet}{} \zeroone{\annotation} {\typecstr} + {\tt :=} {\term} \\ +{\cofixpointbody} & ::= & {\idparams} {\tt :=} {\term} \\ + & &\\ +{\annotation} & ::= & {\tt \{ struct} {\ident} {\tt \}} \\ +&&\\ +{\caseitem} & ::= & {\term} \zeroone{{\tt as} \name} + \zeroone{{\tt in} \term} \\ +&&\\ +{\ifitem} & ::= & \zeroone{{\tt as} {\name}} {\returntype} \\ +&&\\ +{\returntype} & ::= & {\tt return} {\term} \\ +&&\\ +{\eqn} & ::= & \nelist{\pattern}{\tt ,} {\tt =>} {\term}\\ +&&\\ +{\pattern} & ::= & {\qualid} \nelist{\pattern}{} \\ + & $|$ & {\pattern} {\tt as} {\ident} \\ + & $|$ & {\pattern} {\tt \%} {\ident} \\ + & $|$ & {\qualid} \\ + & $|$ & {\tt \_} \\ + & $|$ & {\num} \\ + & $|$ & {\tt (} \nelist{\pattern}{,} {\tt )} +\end{tabular} +\end{centerframe} +\caption{Syntax of terms (continued)} +\label{term-syntax-aux} +\end{figure} + + +%%%%%%% + +\subsection{Types} + +{\Coq} terms are typed. {\Coq} types are recognized by the same +syntactic class as {\term}. We denote by {\type} the semantic subclass +of types inside the syntactic class {\term}. +\index{type@{\type}} + + +\subsection{Qualified identifiers and simple identifiers +\label{qualid} +\label{ident}} + +{\em Qualified identifiers} ({\qualid}) denote {\em global constants} +(definitions, lemmas, theorems, remarks or facts), {\em global +variables} (parameters or axioms), {\em inductive +types} or {\em constructors of inductive types}. +{\em Simple identifiers} (or shortly {\ident}) are a +syntactic subset of qualified identifiers. Identifiers may also +denote local {\em variables}, what qualified identifiers do not. + +\subsection{Numerals +\label{numerals}} + +Numerals have no definite semantics in the calculus. They are mere +notations that can be bound to objects through the notation mechanism +(see chapter~\ref{Addoc-syntax} for details). Initially, numerals are +bound to Peano's representation of natural numbers +(see~\ref{libnats}). + +Note: negative integers are not at the same level as {\num}, for this +would make precedence unnatural. + +\subsection{Sorts +\index{Sorts} +\index{Type@{\Type}} +\index{Set@{\Set}} +\index{Prop@{\Prop}} +\index{Sorts} +\label{Gallina-sorts}} + +There are three sorts \Set, \Prop\ and \Type. +\begin{itemize} +\item \Prop\ is the universe of {\em logical propositions}. +The logical propositions themselves are typing the proofs. +We denote propositions by {\form}. This constitutes a semantic +subclass of the syntactic class {\term}. +\index{form@{\form}} +\item \Set\ is is the universe of {\em program +types} or {\em specifications}. +The specifications themselves are typing the programs. +We denote specifications by {\specif}. This constitutes a semantic +subclass of the syntactic class {\term}. +\index{specif@{\specif}} +\item {\Type} is the type of {\Set} and {\Prop} +\end{itemize} +\noindent More on sorts can be found in section \ref{Sorts}. + +\subsection{Binders +\label{Binders} +\index{binders}} + +Various constructions introduce variables which scope is some of its +sub-expressions. There is a uniform syntax for this. A binder may be +an (unqualified) identifier: the name to use to refer to this +variable. If the variable is not to be used, its name can be {\tt +\_}. When its type cannot be synthesized by the system, it can be +specified with notation {\tt (}\,{\ident}\,{\tt :}\,{\type}\,{\tt +)}. There is a notation for several variables sharing the same type: +{\tt (}\,{\ident$_1$}\ldots{\ident$_n$}\,{\tt :}\,{\type}\,{\tt )}. + +Some constructions allow ``let-binders'', that is either a binder as +defined above, or a variable with a value. The notation is {\tt +(}\,{\ident}\,{\tt :=}\,{\term}\,{\tt )}. Only one variable can be +introduced at the same time. It is also possible to give the type of +the variable before the symbol {\tt :=}. + +The last kind of binders is the ``binder list''. It is either a list +of let-binders (the first one not being a variable with value), or +{\ident$_1$}\ldots{\ident$_n$}\,{\tt :}\,{\type} if all variables +share the same type. + +{\Coq} terms are typed. {\Coq} types are recognized by the same +syntactic class as {\term}. We denote by {\type} the semantic subclass +of types inside the syntactic class {\term}. +\index{type@{\type}} + +\subsection{Abstractions +\label{abstractions} +\index{abstractions}} + +The expression ``{\tt fun} {\ident} {\tt :} \type {\tt =>}~{\term}'' +denotes the {\em abstraction} of the variable {\ident} of type +{\type}, over the term {\term}. Put in another way, it is function of +formal parameter {\ident} of type {\type} returning {\term}. + +Keyword {\tt fun} is followed by a ``binder list'', so any of the +binders of Section~\ref{Binders} apply. Internally, abstractions are +only over one variable. Multiple variable binders are an iteration of +the single variable abstraction: notation {\tt +fun}~{\ident$_{1}$}~{\ldots}~{\ident$_{n}$}~{\tt :}~\type~{\tt +=>}~{\term} stands for {\tt fun}~{\ident$_{1}$}~{\tt :}~\type~{\tt +=>}~{\ldots}~{\tt fun}~{\ident$_{n}$}~{\tt :}~\type~{\tt =>}~{\term}. +Variables with a value expand to a local definition (see +Section~\ref{let-in}). + +\subsection{Products +\label{products} +\index{products}} + +The expression ``{\tt forall}~{\ident}~{\tt :}~\type~{\tt ,}~{\term}'' +denotes the {\em product} of the variable {\ident} of type {\type}, +over the term {\term}. As for abstractions, {\tt forall} is followed +by a binder list, and it is represented by an iteration of single +variable products. + +Non dependent product types have a special notation ``$A$ {\tt ->} +$B$'' stands for ``{\tt forall \_:}$A${\tt ,}~$B$''. This is to stress +on the fact that non dependent product types are usual functional types. + +\subsection{Applications +\label{applications} +\index{applications}} + +The expression \term$_0$ \term$_1$ denotes the application of + term \term$_0$ to \term$_1$. + +The expression {\tt }\term$_0$ \term$_1$ ... \term$_n${\tt} +denotes the application of the term \term$_0$ to the arguments +\term$_1$ ... then \term$_n$. It is equivalent to {\tt } {\ldots} +{\tt (} {\term$_0$} {\term$_1$} {\tt )} {\ldots} {\term$_n$} {\tt }: +associativity is to the left. + +When using implicit arguments mechanism, implicit positions can be +forced a value with notation {\tt (}\,{\ident}\,{\tt +:=}\,{\term}\,{\tt )} or {\tt (}\,{\num}\,{\tt +:=}\,{\term}\,{\tt )}. See Section~\ref{Implicits-explicitation} for +details. + +\subsection{Type cast +\label{typecast} +\index{Cast}} + +The expression ``{\term}~{\tt :}~{\type}'' is a type cast +expression. It enforces the type of {\term} to be {\type}. + +\subsection{Inferable subterms +\label{hole} +\index{\_}} + +Since there are redundancies, a term can be type-checked without +giving it in totality. Subterms that are left to guess by the +type-checker are replaced by ``\_''. + + +\subsection{Local definitions (let-in) +\label{let-in} +\index{Local definitions} +\index{let-in}} + + +{\tt let}~{\ident}~{\tt :=}~{\term$_1$}~{\tt in}~{\term$_2$} denotes +the local binding of \term$_1$ to the variable $\ident$ in +\term$_2$. + +There is a syntactic sugar for local definition of functions: {\tt +let} {\ident} {\binder$_1$} \ldots {\binder$_n$} {\tt :=} {\term$_1$} +{\tt in} {\term$_2$} stands for {\tt let} {\ident} {\tt := fun} +{\binder$_1$} \ldots {\binder$_n$} {\tt in} {\term$_2$}. + + +\subsection{Definition by case analysis +\label{caseanalysis} +\index{match@{\tt match\ldots with\ldots end}}} + + +This paragraph only shows simple variants of case analysis. See +Section~\ref{Mult-match} and Chapter~\ref{Mult-match-full} for +explanations of the general form. + +Objects of inductive types can be destructurated by a case-analysis +construction, also called pattern-matching in functional languages. In +its simple form, a case analysis expression is used to analyze the +structure of an inductive objects (upon which constructor it is +built). + +The expression {\tt match} {\term$_0$} {\returntype} {\tt with} +{\pattern$_1$} {\tt =>} {\term$_1$} {\tt $|$} {\ldots} {\tt $|$} +{\pattern$_n$} {\tt =>} {\term$_n$} {\tt end}, denotes a {\em +pattern-matching} over the term {\term$_0$} (expected to be of an +inductive type $I$). {\term$_1$}\ldots{\term$_n$} are called branches. In +a simple pattern \qualid~\nelist{\ident}{}, the qualified identifier +{\qualid} is intended to +be a constructor. There should be a branch for every constructor of +$I$. + +The {\returntype} is used to compute the resulting type of the whole +{\tt match} expression and the type of the branches. Most of the time, +when this type is the same as the types of all the {\term$_i$}, the +annotation is not needed\footnote{except if no equation is given, to +match the term in an empty type, e.g. the type \texttt{False}}. This +annotation has to be given when the resulting type of the whole {\tt +match} depends on the actual {\term$_0$} matched. + +There are specific notations for case analysis on types with one or +two constructors: {\tt if / then / else} and +{\tt let (}\ldots{\tt ) :=} \ldots {\tt in}\ldots. \SeeAlso +section~\ref{Mult-match} for details and examples. + +\SeeAlso Section~\ref{Mult-match} for details and examples. + +\subsection{Recursive functions +\label{fixpoints} +\index{fix@{fix \ident$_i$\{\dots\}}}} + +Expression ``{\tt fix} \ident$_1$ \binder$_1$ {\tt :} {\type$_1$} +\texttt{:=} \term$_1$ {\tt with} {\ldots} {\tt with} \ident$_n$ +\binder$_n$~{\tt :} {\type$_n$} \texttt{:=} \term$_n$ {\tt for} +{\ident$_i$}'' denotes the $i$th component of a block of functions +defined by mutual well-founded recursion. It is the local counterpart +of the {\tt Fixpoint} command. See Section~\ref{Fixpoint} for more +details. When $n=1$, the {\tt for}~{\ident$_i$} is omitted. + +The expression ``{\tt cofix} \ident$_1$~\binder$_1$ {\tt :} +{\type$_1$} {\tt with} {\ldots} {\tt with} \ident$_n$ \binder$_n$ {\tt +:} {\type$_n$}~{\tt for} {\ident$_i$}'' denotes the $i$th component of +a block of terms defined by a mutual guarded co-recursion. It is the +local counterpart of the {\tt CoFixpoint} command. See +Section~\ref{CoFixpoint} for more details. When $n=1$, the {\tt +for}~{\ident$_i$} is omitted. + +The association of a single fixpoint and a local +definition have a special syntax: ``{\tt let fix}~$f$~{\ldots}~{\tt + :=}~{\ldots}~{\tt in}~{\ldots}'' stands for ``{\tt let}~$f$~{\tt := + fix}~$f$~\ldots~{\tt :=}~{\ldots}~{\tt in}~{\ldots}''. The same + applies for co-fixpoints. + + +\section{The Vernacular +\label{Vernacular}} + +\begin{figure}[tbp] +\begin{centerframe} +\begin{tabular}{lcl} +{\sentence} & ::= & {\declaration} \\ + & $|$ & {\definition} \\ + & $|$ & {\inductive} \\ + & $|$ & {\fixpoint} \\ + & $|$ & {\statement} \zeroone{\proof} \\ +&&\\ +%% Declarations +{\declaration} & ::= & {\declarationkeyword} {\assums} {\tt .} \\ +&&\\ +{\declarationkeyword} & ::= & {\tt Axiom} $|$ {\tt Conjecture} \\ + & $|$ & {\tt Parameter} $|$ {\tt Parameters} \\ + & $|$ & {\tt Variable} $|$ {\tt Variables} \\ + & $|$ & {\tt Hypothesis} $|$ {\tt Hypotheses}\\ +&&\\ +{\assums} & ::= & \nelist{\ident}{} {\tt :} {\term} \\ + & $|$ & \nelist{\binder}{} \\ +&&\\ +%% Definitions +{\definition} & ::= & + {\tt Definition} {\idparams} {\tt :=} {\term} {\tt .} \\ + & $|$ & {\tt Let} {\idparams} {\tt :=} {\term} {\tt .} \\ +&&\\ +%% Inductives +{\inductive} & ::= & + {\tt Inductive} \nelist{\inductivebody}{with} {\tt .} \\ + & $|$ & {\tt CoInductive} \nelist{\inductivebody}{with} {\tt .} \\ + & & \\ +{\inductivebody} & ::= & + {\ident} \sequence{\binderlet}{} {\tt :} {\term} {\tt :=} \\ + && ~~~\zeroone{\zeroone{\tt |} \nelist{\idparams}{|}} \\ + & & \\ %% TODO: where ... +%% Fixpoints +{\fixpoint} & ::= & {\tt Fixpoint} \nelist{\fixpointbody}{with} {\tt .} \\ + & $|$ & {\tt CoFixpoint} \nelist{\cofixpointbody}{with} {\tt .} \\ +&&\\ +%% Lemmas & proofs +{\statement} & ::= & + {\statkwd} {\ident} \sequence{\binderlet}{} {\tt :} {\term} {\tt .} \\ +&&\\ + {\statkwd} & ::= & {\tt Theorem} $|$ {\tt Lemma} $|$ {\tt Definition} \\ +&&\\ +{\proof} & ::= & {\tt Proof} {\tt .} {\dots} {\tt Qed} {\tt .}\\ + & $|$ & {\tt Proof} {\tt .} {\dots} {\tt Defined} {\tt .}\\ + & $|$ & {\tt Proof} {\tt .} {\dots} {\tt Admitted} {\tt .} +\end{tabular} +\end{centerframe} +\caption{Syntax of sentences} +\label{sentences-syntax} +\end{figure} + +Figure \ref{sentences-syntax} describes {\em The Vernacular} which is the +language of commands of \gallina. A sentence of the vernacular +language, like in many natural languages, begins with a capital letter +and ends with a dot. + +The different kinds of command are described hereafter. They all suppose +that the terms occurring in the sentences are well-typed. + +%% +%% Axioms and Parameters +%% +\subsection{Declarations +\index{Declarations} +\label{Declarations}} + +The declaration mechanism allows the user to specify his own basic +objects. Declared objects play the role of axioms or parameters in +mathematics. A declared object is an {\ident} associated to a \term. A +declaration is accepted by {\Coq} if and only if this {\term} is a +correct type in the current context of the declaration and \ident\ was +not previously defined in the same module. This {\term} is considered +to be the type, or specification, of the \ident. + +\subsubsection{{\tt Axiom {\ident} :{\term} .} +\comindex{Axiom} +\comindex{Parameter}\comindex{Parameters} +\comindex{Conjecture} +\label{Axiom}} + +This command links {\term} to the name {\ident} as its specification +in the global context. The fact asserted by {\term} is thus assumed as +a postulate. + +\begin{ErrMsgs} +\item \errindex{{\ident} already exists} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Parameter {\ident} :{\term}.} \\ + Is equivalent to {\tt Axiom {\ident} : {\term}} + +\item {\tt Parameter {\ident$_1$}\ldots{\ident$_n$} {\tt :}{\term}.}\\ + Adds $n$ parameters with specification {\term} + +\item + {\tt Parameter\,% +(\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% +\ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% +{\term$_n$} {\tt )}.}\\ + Adds $n$ blocks of parameters with different specifications. + +\item {\tt Conjecture {\ident} :{\term}.}\\ + Is equivalent to {\tt Axiom {\ident} : {\term}}. +\end{Variants} + +\noindent {\bf Remark: } It is possible to replace {\tt Parameter} by +{\tt Parameters}. + + +\subsubsection{{\tt Variable {\ident} :{\term}}. +\comindex{Variable} +\comindex{Variables} +\comindex{Hypothesis} +\comindex{Hypotheses}} + +This command links {\term} to the name {\ident} in the context of the +current section (see Section~\ref{Section} for a description of the section +mechanism). When the current section is closed, name {\ident} will be +unknown and every object using this variable will be explicitly +parameterized (the variable is {\em discharged}). Using the {\tt +Variable} command out of any section is equivalent to {\tt Axiom}. + +\begin{ErrMsgs} +\item \errindex{{\ident} already exists} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Variable {\ident$_1$}\ldots{\ident$_n$} {\tt :}{\term}.}\\ + Links {\term} to names {\ident$_1$}\ldots{\ident$_n$}. +\item + {\tt Variable\,% +(\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% +\ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% +{\term$_n$} {\tt )}.}\\ + Adds $n$ blocks of variables with different specifications. +\item {\tt Hypothesis {\ident} {\tt :}{\term}.} \\ + \texttt{Hypothesis} is a synonymous of \texttt{Variable} +\end{Variants} + +\noindent {\bf Remark: } It is possible to replace {\tt Variable} by +{\tt Variables} and {\tt Hypothesis} by {\tt Hypotheses}. + +It is advised to use the keywords \verb:Axiom: and \verb:Hypothesis: +for logical postulates (i.e. when the assertion {\term} is of sort +\verb:Prop:), and to use the keywords \verb:Parameter: and +\verb:Variable: in other cases (corresponding to the declaration of an +abstract mathematical entity). + +%% +%% Definitions +%% +\subsection{Definitions +\index{Definitions} +\label{Simpl-definitions}} + +Definitions differ from declarations in allowing to give a name to a +term whereas declarations were just giving a type to a name. That is +to say that the name of a defined object can be replaced at any time +by its definition. This replacement is called +$\delta$-conversion\index{delta-reduction@$\delta$-reduction} (see +Section~\ref{delta}). A defined object is accepted by the system if +and only if the defining term is well-typed in the current context of +the definition. Then the type of the name is the type of term. The +defined name is called a {\em constant}\index{Constant} and one says +that {\it the constant is added to the +environment}\index{Environment}. + +A formal presentation of constants and environments is given in +Section~\ref{Typed-terms}. + +\subsubsection{\tt Definition {\ident} := {\term}. +\comindex{Definition}} + +This command binds the value {\term} to the name {\ident} in the +environment, provided that {\term} is well-typed. + +\begin{ErrMsgs} +\item \errindex{{\ident} already exists} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Definition {\ident} {\tt :}{\term$_1$} := {\term$_2$}.}\\ + It checks that the type of {\term$_2$} is definitionally equal to + {\term$_1$}, and registers {\ident} as being of type {\term$_1$}, + and bound to value {\term$_2$}. +\item {\tt Definition {\ident} {\binder$_1$}\ldots{\binder$_n$} + {\tt :}\term$_1$ {\tt :=} {\term$_2$}.}\\ + This is equivalent to \\ + {\tt Definition\,{\ident}\,{\tt :\,forall}\,% + {\binder$_1$}\ldots{\binder$_n$}{\tt ,}\,\term$_1$\,{\tt :=}}\,% + {\tt fun}\,{\binder$_1$}\ldots{\binder$_n$}\,{\tt =>}\,{\term$_2$}\,% + {\tt .} +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{In environment {\dots} the term: {\term$_2$} does not have type + {\term$_1$}}.\\ + \texttt{Actually, it has type {\term$_3$}}. +\end{ErrMsgs} + +\SeeAlso Sections \ref{Opaque}, \ref{Transparent}, \ref{unfold} + +\subsubsection{\tt Let {\ident} := {\term}. +\comindex{Let}} + +This command binds the value {\term} to the name {\ident} in the +environment of the current section. The name {\ident} disappears +when the current section is eventually closed, and, all +persistent objects (such as theorems) defined within the +section and depending on {\ident} are prefixed by the local definition +{\tt let {\ident} := {\term} in}. + +\begin{ErrMsgs} +\item \errindex{{\ident} already exists} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Let {\ident} : {\term$_1$} := {\term$_2$}.} +\end{Variants} + +\SeeAlso Sections \ref{Section} (section mechanism), \ref{Opaque}, +\ref{Transparent} (opaque/transparent constants), \ref{unfold} + +%% +%% Inductive Types +%% +\subsection{Inductive definitions +\index{Inductive definitions} +\label{gal_Inductive_Definitions} +\comindex{Inductive} +\label{Inductive}} + +We gradually explain simple inductive types, simple +annotated inductive types, simple parametric inductive types, +mutually inductive types. We explain also co-inductive types. + +\subsubsection{Simple inductive types} + +The definition of a simple inductive type has the following form: + +\medskip +{\tt +\begin{tabular}{l} +Inductive {\ident} : {\sort} := \\ +\begin{tabular}{clcl} + & {\ident$_1$} &:& {\type$_1$} \\ + | & {\ldots} && \\ + | & {\ident$_n$} &:& {\type$_n$} +\end{tabular} +\end{tabular} +} +\medskip + +The name {\ident} is the name of the inductively defined type and +{\sort} is the universes where it lives. +The names {\ident$_1$}, {\ldots}, {\ident$_n$} +are the names of its constructors and {\type$_1$}, {\ldots}, +{\type$_n$} their respective types. The types of the constructors have +to satisfy a {\em positivity condition} (see Section~\ref{Positivity}) +for {\ident}. This condition ensures the soundness of the inductive +definition. If this is the case, the constants {\ident}, +{\ident$_1$}, {\ldots}, {\ident$_n$} are added to the environment with +their respective types. Accordingly to the universe where +the inductive type lives ({\it e.g.} its type {\sort}), {\Coq} provides a +number of destructors for {\ident}. Destructors are named +{\ident}{\tt\_ind}, {\ident}{\tt \_rec} or {\ident}{\tt \_rect} which +respectively correspond to elimination principles on {\tt Prop}, {\tt +Set} and {\tt Type}. The type of the destructors expresses structural +induction/recursion principles over objects of {\ident}. We give below +two examples of the use of the {\tt Inductive} definitions. + +The set of natural numbers is defined as: +\begin{coq_example} +Inductive nat : Set := + | O : nat + | S : nat -> nat. +\end{coq_example} + +The type {\tt nat} is defined as the least \verb:Set: containing {\tt + O} and closed by the {\tt S} constructor. The constants {\tt nat}, +{\tt O} and {\tt S} are added to the environment. + +Now let us have a look at the elimination principles. They are three : +{\tt nat\_ind}, {\tt nat\_rec} and {\tt nat\_rect}. The type of {\tt + nat\_ind} is: +\begin{coq_example} +Check nat_ind. +\end{coq_example} + +This is the well known structural induction principle over natural +numbers, i.e. the second-order form of Peano's induction principle. +It allows to prove some universal property of natural numbers ({\tt +forall n:nat, P n}) by induction on {\tt n}. + +The types of {\tt nat\_rec} and {\tt nat\_rect} are similar, except +that they pertain to {\tt (P:nat->Set)} and {\tt (P:nat->Type)} +respectively . They correspond to primitive induction principles +(allowing dependent types) respectively over sorts \verb:Set: and +\verb:Type:. The constant {\ident}{\tt \_ind} is always provided, +whereas {\ident}{\tt \_rec} and {\ident}{\tt \_rect} can be impossible +to derive (for example, when {\ident} is a proposition). + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{Variants} +\item +\begin{coq_example*} +Inductive nat : Set := O | S (_:nat). +\end{coq_example*} +In the case where inductive types have no annotations (next section +gives an example of such annotations), the positivity condition +implies that a constructor can be defined by only giving the type of +its arguments. +\end{Variants} + +\subsubsection{Simple annotated inductive types} + +In an annotated inductive types, the universe where the inductive +type is defined is no longer a simple sort, but what is called an +arity, which is a type whose conclusion is a sort. + +As an example of annotated inductive types, let us define the +$even$ predicate: + +\begin{coq_example} +Inductive even : nat -> Prop := + | even_0 : even O + | even_SS : forall n:nat, even n -> even (S (S n)). +\end{coq_example} + +The type {\tt nat->Prop} means that {\tt even} is a unary predicate +(inductively defined) over natural numbers. The type of its two +constructors are the defining clauses of the predicate {\tt even}. The +type of {\tt even\_ind} is: + +\begin{coq_example} +Check even_ind. +\end{coq_example} + +From a mathematical point of view it asserts that the natural numbers +satisfying the predicate {\tt even} are exactly the naturals satisfying +the clauses {\tt even\_0} or {\tt even\_SS}. This is why, when we want +to prove any predicate {\tt P} over elements of {\tt even}, it is +enough to prove it for {\tt O} and to prove that if any natural number +{\tt n} satisfies {\tt P} its double successor {\tt (S (S n))} +satisfies also {\tt P}. This is indeed analogous to the structural +induction principle we got for {\tt nat}. + +\begin{ErrMsgs} +\item \errindex{Non strictly positive occurrence of {\ident} in {\type}} +\item \errindex{The conclusion of {\type} is not valid; it must be +built from {\ident}} +\end{ErrMsgs} + +\subsubsection{Parameterized inductive types} + +Inductive types may be parameterized. Parameters differ from inductive +type annotations in the fact that recursive invokations of inductive +types must always be done with the same values of parameters as its +specification. + +The general scheme is: +\begin{center} +{\tt Inductive} {\ident} {\binder$_1$}\ldots{\binder$_k$} : {\term} := + {\ident$_1$}: {\term$_1$} | {\ldots} | {\ident$_n$}: \term$_n$ +{\tt .} +\end{center} + +A typical example is the definition of polymorphic lists: +\begin{coq_example*} +Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. +\end{coq_example*} + +Note that in the type of {\tt nil} and {\tt cons}, we write {\tt + (list A)} and not just {\tt list}.\\ The constants {\tt nil} and +{\tt cons} will have respectively types: + +\begin{coq_example} +Check nil. +Check cons. +\end{coq_example} + +Types of destructors are also quantified with {\tt (A:Set)}. + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{Variants} +\item +\begin{coq_example*} +Inductive list (A:Set) : Set := nil | cons (_:A) (_:list A). +\end{coq_example*} +This is an alternative definition of lists where we specify the +arguments of the constructors rather than their full type. +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{The {\num}th argument of {\ident} must be {\ident'} in {\type}} +\end{ErrMsgs} + +\SeeAlso Sections~\ref{Cic-inductive-definitions} and~\ref{elim}. + + +\subsubsection{Mutually defined inductive types +\comindex{Mutual Inductive} +\label{Mutual-Inductive}} + +The definition of a block of mutually inductive types has the form: + +\medskip +{\tt +\begin{tabular}{l} +Inductive {\ident$_1$} : {\type$_1$} := \\ +\begin{tabular}{clcl} + & {\ident$_1^1$} &:& {\type$_1^1$} \\ + | & {\ldots} && \\ + | & {\ident$_{n_1}^1$} &:& {\type$_{n_1}^1$} +\end{tabular} \\ +with\\ +~{\ldots} \\ +with {\ident$_m$} : {\type$_m$} := \\ +\begin{tabular}{clcl} + & {\ident$_1^m$} &:& {\type$_1^m$} \\ + | & {\ldots} \\ + | & {\ident$_{n_m}^m$} &:& {\type$_{n_m}^m$}. +\end{tabular} +\end{tabular} +} +\medskip + +\noindent It has the same semantics as the above {\tt Inductive} +definition for each \ident$_1$, {\ldots}, \ident$_m$. All names +\ident$_1$, {\ldots}, \ident$_m$ and \ident$_1^1$, \dots, +\ident$_{n_m}^m$ are simultaneously added to the environment. Then +well-typing of constructors can be checked. Each one of the +\ident$_1$, {\ldots}, \ident$_m$ can be used on its own. + +It is also possible to parameterize these inductive definitions. +However, parameters correspond to a local +context in which the whole set of inductive declarations is done. For +this reason, the parameters must be strictly the same for each +inductive types The extended syntax is: + +\medskip +{\tt +Inductive {{\ident$_1$} {\params} : {\type$_1$} := \\ +\mbox{}\hspace{0.4cm} {\ident$_1^1$} : {\type$_1^1$} \\ +\mbox{}\hspace{0.1cm}| .. \\ +\mbox{}\hspace{0.1cm}| {\ident$_{n_1}^1$} : {\type$_{n_1}^1$} \\ +with\\ +\mbox{}\hspace{0.1cm} .. \\ +with {\ident$_m$} {\params} : {\type$_m$} := \\ +\mbox{}\hspace{0.4cm}{\ident$_1^m$} : {\type$_1^m$} \\ +\mbox{}\hspace{0.1cm}| .. \\ +\mbox{}\hspace{0.1cm}| {\ident$_{n_m}^m$} : {\type$_{n_m}^m$}. +}} +\medskip + +\Example +The typical example of a mutual inductive data type is the one for +trees and forests. We assume given two types $A$ and $B$ as variables. +It can be declared the following way. + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example*} +Variables A B : Set. +Inductive tree : Set := + node : A -> forest -> tree +with forest : Set := + | leaf : B -> forest + | cons : tree -> forest -> forest. +\end{coq_example*} + +This declaration generates automatically six induction +principles. They are respectively +called {\tt tree\_rec}, {\tt tree\_ind}, {\tt + tree\_rect}, {\tt forest\_rec}, {\tt forest\_ind}, {\tt + forest\_rect}. These ones are not the most general ones but are +just the induction principles corresponding to each inductive part +seen as a single inductive definition. + +To illustrate this point on our example, we give the types of {\tt + tree\_rec} and {\tt forest\_rec}. + +\begin{coq_example} +Check tree_rec. +Check forest_rec. +\end{coq_example} + +Assume we want to parameterize our mutual inductive definitions with +the two type variables $A$ and $B$, the declaration should be done the +following way: + +\begin{coq_eval} +Reset tree. +\end{coq_eval} +\begin{coq_example*} +Inductive tree (A B:Set) : Set := + node : A -> forest A B -> tree A B +with forest (A B:Set) : Set := + | leaf : B -> forest A B + | cons : tree A B -> forest A B -> forest A B. +\end{coq_example*} + +Assume we define an inductive definition inside a section. When the +section is closed, the variables declared in the section and occurring +free in the declaration are added as parameters to the inductive +definition. + +\SeeAlso Section~\ref{Section} + +\subsubsection{Co-inductive types +\label{CoInductiveTypes} +\comindex{CoInductive}} + +The objects of an inductive type are well-founded with respect to the +constructors of the type. In other words, such objects contain only a +{\it finite} number constructors. Co-inductive types arise from +relaxing this condition, and admitting types whose objects contain an +infinity of constructors. Infinite objects are introduced by a +non-ending (but effective) process of construction, defined in terms +of the constructors of the type. + +An example of a co-inductive type is the type of infinite sequences of +natural numbers, usually called streams. It can be introduced in \Coq\ +using the \texttt{CoInductive} command: +\begin{coq_example} +CoInductive Stream : Set := + Seq : nat -> Stream -> Stream. +\end{coq_example} + +The syntax of this command is the same as the command \texttt{Inductive} +(cf. Section~\ref{gal_Inductive_Definitions}). Notice that no +principle of induction is derived from the definition of a +co-inductive type, since such principles only make sense for inductive +ones. For co-inductive ones, the only elimination principle is case +analysis. For example, the usual destructors on streams +\texttt{hd:Stream->nat} and \texttt{tl:Str->Str} can be defined as +follows: +\begin{coq_example} +Definition hd (x:Stream) := let (a,s) := x in a. +Definition tl (x:Stream) := let (a,s) := x in s. +\end{coq_example} + +Definition of co-inductive predicates and blocks of mutually +co-inductive definitions are also allowed. An example of a +co-inductive predicate is the extensional equality on streams: + +\begin{coq_example} +CoInductive EqSt : Stream -> Stream -> Prop := + eqst : + forall s1 s2:Stream, + hd s1 = hd s2 -> EqSt (tl s1) (tl s2) -> EqSt s1 s2. +\end{coq_example} + +In order to prove the extensionally equality of two streams $s_1$ and +$s_2$ we have to construct and infinite proof of equality, that is, +an infinite object of type $(\texttt{EqSt}\;s_1\;s_2)$. We will see +how to introduce infinite objects in Section~\ref{CoFixpoint}. + +%% +%% (Co-)Fixpoints +%% +\subsection{Definition of recursive functions} + +\subsubsection{\tt Fixpoint {\ident} {\params} {\tt \{struct} + \ident$_0$ {\tt \}} : type$_0$ := \term$_0$ +\comindex{Fixpoint} +\label{Fixpoint}} + +This command allows to define inductive objects using a fixed point +construction. The meaning of this declaration is to define {\it ident} +a recursive function with arguments specified by +{\binder$_1$}\ldots{\binder$_n$} such that {\it ident} applied to +arguments corresponding to these binders has type \type$_0$, and is +equivalent to the expression \term$_0$. The type of the {\ident} is +consequently {\tt forall {\params} {\tt,} \type$_0$} +and the value is equivalent to {\tt fun {\params} {\tt =>} \term$_0$}. + +To be accepted, a {\tt Fixpoint} definition has to satisfy some +syntactical constraints on a special argument called the decreasing +argument. They are needed to ensure that the {\tt Fixpoint} definition +always terminates. The point of the {\tt \{struct \ident {\tt \}}} +annotation is to let the user tell the system which argument decreases +along the recursive calls. This annotation may be left implicit for +fixpoints with one argument. For instance, one can define the addition +function as : + +\begin{coq_example} +Fixpoint add (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (add p m) + end. +\end{coq_example} + +The {\tt match} operator matches a value (here \verb:n:) with the +various constructors of its (inductive) type. The remaining arguments +give the respective values to be returned, as functions of the +parameters of the corresponding constructor. Thus here when \verb:n: +equals \verb:O: we return \verb:m:, and when \verb:n: equals +\verb:(S p): we return \verb:(S (add p m)):. + +The {\tt match} operator is formally described +in detail in Section~\ref{Caseexpr}. The system recognizes that in +the inductive call {\tt (add p m)} the first argument actually +decreases because it is a {\em pattern variable} coming from {\tt match + n with}. + +\Example The following definition is not correct and generates an +error message: + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is not correct and should produce **********) +(********* Error: Recursive call to wrongplus ... **********) +\end{coq_eval} +\begin{coq_example} +Fixpoint wrongplus (n m:nat) {struct n} : nat := + match m with + | O => n + | S p => S (wrongplus n p) + end. +\end{coq_example} + +because the declared decreasing argument {\tt n} actually does not +decrease in the recursive call. The function computing the addition +over the second argument should rather be written: + +\begin{coq_example*} +Fixpoint plus (n m:nat) {struct m} : nat := + match m with + | O => n + | S p => S (plus n p) + end. +\end{coq_example*} + +The ordinary match operation on natural numbers can be mimicked in the +following way. +\begin{coq_example*} +Fixpoint nat_match + (C:Set) (f0:C) (fS:nat -> C -> C) (n:nat) {struct n} : C := + match n with + | O => f0 + | S p => fS p (nat_match C f0 fS p) + end. +\end{coq_example*} +The recursive call may not only be on direct subterms of the recursive +variable {\tt n} but also on a deeper subterm and we can directly +write the function {\tt mod2} which gives the remainder modulo 2 of a +natural number. +\begin{coq_example*} +Fixpoint mod2 (n:nat) : nat := + match n with + | O => O + | S p => match p with + | O => S O + | S q => mod2 q + end + end. +\end{coq_example*} +In order to keep the strong normalisation property, the fixed point +reduction will only be performed when the argument in position of the +decreasing argument (which type should be in an inductive definition) +starts with a constructor. + +The {\tt Fixpoint} construction enjoys also the {\tt with} extension +to define functions over mutually defined inductive types or more +generally any mutually recursive definitions. + +\begin{Variants} +\item {\tt Fixpoint {\ident$_1$} {\params$_1$} :{\type$_1$} := {\term$_1$}\\ + with {\ldots} \\ + with {\ident$_m$} {\params$_m$} :{\type$_m$} := {\type$_m$}}\\ + Allows to define simultaneously {\ident$_1$}, {\ldots}, + {\ident$_m$}. +\end{Variants} + +\Example +The size of trees and forests can be defined the following way: +\begin{coq_eval} +Reset Initial. +Variables A B : Set. +Inductive tree : Set := + node : A -> forest -> tree +with forest : Set := + | leaf : B -> forest + | cons : tree -> forest -> forest. +\end{coq_eval} +\begin{coq_example*} +Fixpoint tree_size (t:tree) : nat := + match t with + | node a f => S (forest_size f) + end + with forest_size (f:forest) : nat := + match f with + | leaf b => 1 + | cons t f' => (tree_size t + forest_size f') + end. +\end{coq_example*} +A generic command {\tt Scheme} is useful to build automatically various +mutual induction principles. It is described in Section~\ref{Scheme}. + +\subsubsection{\tt CoFixpoint {\ident} : \type$_0$ := \term$_0$. +\comindex{CoFixpoint} +\label{CoFixpoint}} + +The {\tt CoFixpoint} command introduces a method for constructing an +infinite object of a coinduc\-tive type. For example, the stream +containing all natural numbers can be introduced applying the +following method to the number \texttt{O} (see +Section~\ref{CoInductiveTypes} for the definition of {\tt Stream}, +{\tt hd} and {\tt tl}): +\begin{coq_eval} +Reset Initial. +CoInductive Stream : Set := + Seq : nat -> Stream -> Stream. +Definition hd (x:Stream) := match x with + | Seq a s => a + end. +Definition tl (x:Stream) := match x with + | Seq a s => s + end. +\end{coq_eval} +\begin{coq_example} +CoFixpoint from (n:nat) : Stream := Seq n (from (S n)). +\end{coq_example} + +Oppositely to recursive ones, there is no decreasing argument in a +co-recursive definition. To be admissible, a method of construction +must provide at least one extra constructor of the infinite object for +each iteration. A syntactical guard condition is imposed on +co-recursive definitions in order to ensure this: each recursive call +in the definition must be protected by at least one constructor, and +only by constructors. That is the case in the former definition, where +the single recursive call of \texttt{from} is guarded by an +application of \texttt{Seq}. On the contrary, the following recursive +function does not satisfy the guard condition: + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is not correct and should produce **********) +(***************** Error: Unguarded recursive call *******************) +\end{coq_eval} +\begin{coq_example} +CoFixpoint filter (p:nat -> bool) (s:Stream) : Stream := + if p (hd s) then Seq (hd s) (filter p (tl s)) else filter p (tl s). +\end{coq_example} + +The elimination of co-recursive definition is done lazily, i.e. the +definition is expanded only when it occurs at the head of an +application which is the argument of a case analysis expression. In +any other context, it is considered as a canonical expression which is +completely evaluated. We can test this using the command +\texttt{Eval}, which computes the normal forms of a term: + +\begin{coq_example} +Eval compute in (from 0). +Eval compute in (hd (from 0)). +Eval compute in (tl (from 0)). +\end{coq_example} + +\begin{Variants} +\item{\tt CoFixpoint {\ident$_1$} {\params} :{\type$_1$} := + {\term$_1$}}\\ As for most constructions, arguments of co-fixpoints + expressions can be introduced before the {\tt :=} sign. +\item{\tt CoFixpoint {\ident$_1$} :{\type$_1$} := {\term$_1$}\\ + with\\ + \mbox{}\hspace{0.1cm} $\ldots$ \\ + with {\ident$_m$} : {\type$_m$} := {\term$_m$}}\\ +As in the \texttt{Fixpoint} command (cf. Section~\ref{Fixpoint}), it +is possible to introduce a block of mutually dependent methods. +\end{Variants} + +%% +%% Theorems & Lemmas +%% +\subsection{Statement and proofs} + +A statement claims a goal of which the proof is then interactively done +using tactics. More on the proof editing mode, statements and proofs can be +found in chapter \ref{Proof-handling}. + +\subsubsection{\tt Theorem {\ident} : {\type}. +\comindex{Theorem} +\comindex{Lemma} +\comindex{Remark} +\comindex{Fact}} + +This command binds {\type} to the name {\ident} in the +environment, provided that a proof of {\type} is next given. + +After a statement, {\Coq} needs a proof. + +\begin{Variants} +\item {\tt Lemma {\ident} : {\type}.}\\ +It is a synonymous of \texttt{Theorem} +\item {\tt Remark {\ident} : {\type}.}\\ +It is a synonymous of \texttt{Theorem} +% Same as {\tt Theorem} except +% that if this statement is in one or more levels of sections then the +% name {\ident} will be accessible only prefixed by the sections names +% when the sections (see \ref{Section} and \ref{LongNames}) will be +% closed. +% %All proofs of persistent objects (such as theorems) referring to {\ident} +% %within the section will be replaced by the proof of {\ident}. + \item {\tt Fact {\ident} : {\type}.}\\ +It is a synonymous of \texttt{Theorem} +% Same as {\tt Remark} except +% that the innermost section name is dropped from the full name. +\item {\tt Definition {\ident} : {\type}.} \\ +Allow to define a term of type {\type} using the proof editing mode. It +behaves as {\tt Theorem} but is intended for the interactive +definition of expression which computational behaviour will be used by +further commands. \SeeAlso~\ref{Transparent} and \ref{unfold}. +\end{Variants} + +\subsubsection{{\tt Proof} {\tt .} \dots {\tt Qed} {\tt .} +\comindex{Proof} +\comindex{Qed} +\comindex{Defined} +\comindex{Save} +\comindex{Goal} +\comindex{Admitted}} + +A proof starts by the keyword {\tt Proof}. Then {\Coq} enters the +proof editing mode until the proof is completed. The proof editing +mode essentially contains tactics that are described in chapter +\ref{Tactics}. Besides tactics, there are commands to manage the proof +editing mode. They are described in chapter \ref{Proof-handling}. When +the proof is completed it should be validated and put in the +environment using the keyword {\tt Qed}. +\medskip + +\ErrMsg +\begin{enumerate} +\item \errindex{{\ident} already exists} +\end{enumerate} + +\begin{Remarks} +\item Several statements can be simultaneously opened. +\item Not only other statements but any vernacular command can be given +within the proof editing mode. In this case, the command is +understood as if it would have been given before the statements still to be +proved. +\item {\tt Proof} is recommended but can currently be omitted. On the +opposite, {\tt Qed} (or {\tt Defined}, see below) is mandatory to validate a proof. +\item Proofs ended by {\tt Qed} are declared opaque (see \ref{Opaque}) +and cannot be unfolded by conversion tactics (see \ref{Conversion-tactics}). +To be able to unfold a proof, you should end the proof by {\tt Defined} + (see below). +\end{Remarks} + +\begin{Variants} +\item {\tt Proof} {\tt .} \dots {\tt Defined} {\tt .}\\ + Same as {\tt Proof} {\tt .} \dots {\tt Qed} {\tt .} but the proof is + then declared transparent (see \ref{Transparent}), which means it + can be unfolded in conversion tactics (see \ref{Conversion-tactics}). +\item {\tt Proof} {\tt .} \dots {\tt Save.}\\ + Same as {\tt Proof} {\tt .} \dots {\tt Qed} {\tt .} +\item {\tt Goal} \type \dots {\tt Save} \ident \\ + Same as {\tt Lemma} \ident {\tt :} \type \dots {\tt Save.} + This is intended to be used in the interactive mode. Conversely to named + lemmas, anonymous goals cannot be nested. +\item {\tt Proof.} \dots {\tt Admitted.}\\ + Turns the current conjecture into an axiom and exits editing of + current proof. +\end{Variants} + +% Local Variables: +% mode: LaTeX +% TeX-master: "Reference-Manual" +% End: + +% $Id: RefMan-gal.tex 8606 2006-02-23 13:58:10Z herbelin $ diff --git a/doc/refman/RefMan-ide.tex b/doc/refman/RefMan-ide.tex new file mode 100644 index 00000000..3c73c141 --- /dev/null +++ b/doc/refman/RefMan-ide.tex @@ -0,0 +1,327 @@ +\chapter{\Coq{} Integrated Development Environment} +\label{Addoc-coqide} +\ttindex{coqide} + +The \Coq{} Integrated Development Environment is a graphical tool, to +be used as a user-friendly replacement to \texttt{coqtop}. Its main +purpose is to allow the user to navigate forward and backward into a +\Coq{} vernacular file, executing corresponding commands or undoing +them respectively. % CREDITS ? Proof general, lablgtk, ... + +\CoqIDE{} is run by typing the command \verb|coqide| on the command +line. Without argument, the main screen is displayed with an ``unnamed +buffer'', and with a file name as argument, another buffer displaying +the contents of that file. Additionally, coqide accepts the same +options as coqtop, given in Chapter~\ref{Addoc-coqc}, the ones having +obviously no meaning for \CoqIDE{} being ignored. + +\begin{figure}[t] +\begin{center} +%HEVEA\imgsrc{coqide.png} +%BEGIN LATEX +\ifpdf % si on est en pdflatex +\includegraphics[width=1.0\textwidth]{coqide.png} +\else +\includegraphics[width=1.0\textwidth]{coqide.eps} +\fi +%END LATEX +\end{center} +\caption{\CoqIDE{} main screen} +\label{fig:coqide} +\end{figure} + +A sample \CoqIDE{} main screen, while navigating into a file +\verb|Fermat.v|, is shown on Figure~\ref{fig:coqide}. At +the top is a menu bar, and a tool bar below it. The large window on +the left is displaying the various \emph{script buffers}. The upper right +window is the \emph{goal window}, where goals to +prove are displayed. The lower right window is the \emph{message window}, +where various messages resulting from commands are displayed. At the +bottom is the status bar. + +\section{Managing files and buffers, basic edition} + +In the script window, you may open arbitrarily many buffers to +edit. The \emph{File} menu allows you to open files or create some, +save them, print or export them into various formats. Among all these +buffers, there is always one which is the current \emph{running + buffer}, whose name is displayed on a green background, which is the +one where Coq commands are currently executed. + +Buffers may be edited as in any text editor, and classical basic +editing commands (Copy/Paste, \ldots) are available in the \emph{Edit} +menu. \CoqIDE{} offers only basic editing commands, so if you need +more complex editing commands, you may launch your favorite text +editor on the current buffer, using the \emph{Edit/External Editor} +menu. + +\section{Interactive navigation into \Coq{} scripts} + +The running buffer is the one where navigation takes place. The +toolbar proposes five basic commands for this. The first one, +represented by a down arrow icon, is for going forward executing one +command. If that command is successful, the part of the script that +has been executed is displayed on a green background. If that command +fails, the error message is displayed in the message window, and the +location of the error is emphasized by a red underline. + +On Figure~\ref{fig:coqide}, the running buffer is \verb|Fermat.v|, all +commands until the \verb|Theorem| have been already executed, and the +user tried to go forward executing \verb|Induction n|. That command +failed because no such tactic exist (tactics are now in +lowercase\ldots), and the wrong word is underlined. + +Notice that the green part of the running buffer is not editable. If +you ever want to modify something you have to go backward using the up +arrow tool, or even better, put the cursor where you want to go back +and use the \textsf{goto} button. Unlike with \verb|coqtop|, you +should never use \verb|Undo| to go backward. + +Two additional tool buttons exist, one to go directly to the end and +one to go back to the beginning. If you try to go to the end, or in +general to run several commands using the \textsf{goto} button, the + execution will stop whenever an error is found. + +If you ever try to execute a command which happens to run during a +long time, and would like to abort it before its +termination, you may use the interrupt button (the white cross on a red circle). + +Finally, notice that these navigation buttons are also available in +the menu, where their keyboard shortcuts are given. + +\section{Try tactics automatically} +\label{sec:trytactics} + +The menu \texttt{Try Tactics} provides some features for automatically +trying to solve the current goal using simple tactics. If such a +tactic succeeds in solving the goal, then its text is automatically +inserted into the script. There is finally a combination of these +tactics, called the \emph{proof wizard} which will try each of them in +turn. This wizard is also available as a tool button (the light +bulb). The set of tactics tried by the wizard is customizable in +the preferences. + +These tactics are general ones, in particular they do not refer to +particular hypotheses. You may also try specific tactics related to +the goal or one of the hypotheses, by clicking with the right mouse +button one the goal or the considered hypothesis. This is the +``contextual menu on goals'' feature, that may be disabled in the +preferences if undesirable. + +\section{Vernacular commands, templates} + +The \texttt{Templates} menu allows to use shortcuts to insert +vernacular commands. This is a nice way to proceed if you are not sure +of the spelling of the command you want. + +Moreover, this menu offers some \emph{templates} which will automatic +insert a complex command like Fixpoint with a convenient shape for its +arguments. + +\section{Queries} + +\begin{figure}[t] +\begin{center} +%HEVEA\imgsrc{coqide-queries.png} +%BEGIN LATEX +\ifpdf % si on est en pdflatex +\includegraphics[width=1.0\textwidth]{coqide-queries.png} +\else +\includegraphics[width=1.0\textwidth]{coqide-queries.eps} +\fi +%END LATEX +\end{center} +\caption{\CoqIDE{}: the query window} +\label{fig:querywindow} +\end{figure} + + +We call \emph{query} any vernacular command that do not change the +current state, such as \verb|Check|, \verb|SearchAbout|, etc. Those +commands are of course useless during compilation of a file, hence +should not be included in scripts. To run such commands without +writing them in the script, \CoqIDE{} offers another input window +called the \emph{query window}. This window can be displayed on +demand, either by using the \texttt{Window} menu, or directly using +shortcuts given in the \texttt{Queries} menu. Indeed, with \CoqIDE{} +the simplest way to perform a \texttt{SearchAbout} on some identifier +is to select it using the mouse, and pressing \verb|F2|. This will +both make appear the query window and run the \texttt{SearchAbout} in +it, displaying the result. Shortcuts \verb|F3| and \verb|F4| are for +\verb|Check| and \verb|Print| respectively. +Figure~\ref{fig:querywindow} displays the query window after selection +of the word ``mult'' in the script windows, and pressing \verb|F4| to +print its definition. + +\section{Compilation} + +The \verb|Compile| menu offers direct commands to: +\begin{itemize} +\item compile the current buffer +\item run a compilation using \verb|make| +\item go to the last compilation error +\item create a \verb|makefile| using \verb|coq_makefile|. +\end{itemize} + +\section{Customizations} + +You may customize your environment using menu +\texttt{Edit/Preferences}. A new window will be displayed, with +several customization sections presented as a notebook. + +The first section is for selecting the text font used for scripts, goal +and message windows. + +The second section is devoted to file management: you may +configure automatic saving of files, by periodically saving the +contents into files named \verb|#f#| for each opened file +\verb|f|. You may also activate the \emph{revert} feature: in case a +opened file is modified on the disk by a third party, \CoqIDE{} may read +it again for you. Note that in the case you edited that same file, you +will be prompt to choose to either discard your changes or not. The +\texttt{File charset encoding} choice is described below in +Section~\ref{sec:coqidecharencoding} + + +The \verb|Externals| section allows to customize the external commands +for compilation, printing, web browsing. In the browser command, you +may use \verb|%s| to denote the URL to open, for example: % +\verb|mozilla -remote "OpenURL(%s)"|. + +The \verb|Tactics Wizard| section allows to defined the set of tactics +that should be tried, in sequence, to solve the current goal. + +The last section is for miscellaneous boolean settings, such as the +``contextual menu on goals'' feature presented in +Section~\ref{sec:trytactics}. + +Notice that these settings are saved in the file \verb|.coqiderc| of +your home directory. + +A gtk2 accelerator keymap is saved under the name \verb|.coqide.keys|. +This file should not be edited manually: to modify a given menu +shortcut, go to the corresponding menu item without releasing the +mouse button, press the key you want for the new shortcut, and release +the mouse button afterwards. + +For experts: it is also possible to set up a specific gtk resource +file, under the name \verb|.coqide-gtk2rc|, following the gtk2 +resources syntax +\url{http://developer.gnome.org/doc/API/2.0/gtk/gtk-Resource-Files.html}. +Such a default resource file exists in the \Coq{} library, you may +copy this file into your home directory, and edit it using any text +editor, \CoqIDE{} itself for example. + +\section{Using unicode symbols} + +\CoqIDE{} supports unicode character encoding in its text windows, +consequently a large set of symbols is available for notations. + +\subsection{Displaying unicode symbols} + +You just need to define suitable notations as described in +Chapter~\ref{Addoc-syntax}. For example, to use the mathematical symbols +$\forall$ and $\exists$, you may define +\begin{quote}\tt +Notation "$\forall$ x : t, P" := \\ +\qquad (forall x:t, P) (at level 200, x ident).\\ +Notation "$\exists$ x : t, P" := \\ +\qquad (exists x:t, P) (at level 200, x ident). +\end{quote} +There exists a small set of such notations already defined, in the +file \verb|utf8.v| of \Coq{} library, so you may enable them just by +\verb|Require utf8| inside \CoqIDE{}, or equivalently, by starting +\CoqIDE{} with \verb|coqide -l utf8|. + +However, there are some issues when using such unicode symbols: you of +course need to use a character font which supports them. In the Fonts +section of the preferences, the Preview line displays some unicode symbols, so +you could figure out if the selected font is OK. Related to this, one +thing you may need to do is choose whether Gtk should use antialiased +fonts or not, by setting the environment variable \verb|GDK_USE_XFT| +to 1 or 0 respectively. + +\subsection{Defining an input method for non ASCII symbols} + +To input an Unicode symbol, a general method is to press both the +CONTROL and the SHIFT keys, and type the hexadecimal code of the +symbol required, for example \verb|2200| for the $\forall$ symbol. +A list of symbol codes is available at \url{http://www.unicode.org}. + +Of course, this method is painful for symbols you use often. There is +always the possibility to copy-paste a symbol already typed in. +Another method is to bind some key combinations for frequently used +symbols. For example, to bind keys \verb|F11| and \verb|F12| to +$\forall$ and $\exists$ respectively, you may add +\begin{quote}\tt + bind "F11" {"insert-at-cursor" ("$\forall$")}\\ + bind "F12" {"insert-at-cursor" ("$\exists$")} +\end{quote} +to your \verb|binding "text"| section in \verb|.coqiderc-gtk2rc|. + + +% such a binding is system-dependent. We +% give here a solution for X11: +% \begin{itemize} +% \item first, using \verb|xmodmap|, bind some key combination into a +% new key name such as Fxx where xx greater that 12: for example (on a +% french keyboard) +% \begin{quote}\tt +% xmodmap -e "keycode 24 = a A F13 F13" \\ +% xmodmap -e "keycode 26 = e E F14 F14" +% \end{quote} +% will rebind "a" to F13 and "e" to F14. +% \item then add +% \begin{quote}\tt +% bind "F13" {"insert-at-cursor" ("$\forall$")}\\ +% bind "F14" {"insert-at-cursor" ("$\exists$")} +% \end{quote} +% to your \verb|binding "text"| section in \verb|.coqiderc-gtk2rc|. +% The strange \verb|∀| argument is the UTF-8 encoding for +% 0x2200, that is the symbol $\forall$. Computing UTF-8 encoding +% for a unicode can be done in various ways, including +% launching a \verb|lablgtk2| toplevel and use +%\begin{verbatim} +% Glib.Utf8.from_unichar 0x2200;; +%\end{verbatim} +%\end{itemize} + +\subsection{Character encoding for saved files} +\label{sec:coqidecharencoding} + +In the \texttt{Files} section of the preferences, the encoding option +is related to the way files are saved. + +If you have no need to exchange files with non UTF-8 aware +applications, it is better to choose the UTF-8 encoding, since it +guarantees that your files will be read again without problems. (This +is because when \CoqIDE{} reads a file, it tries to automatically +detect its character encoding.) + +If you choose something else than UTF-8, then missing characters will +be written encoded by \verb|\x{....}| or \verb|\x{........}| where +each dot is an hexadecimal digit: the number between braces is the +hexadecimal UNICODE index for the missing character. + + +\section{Building a custom \CoqIDE{} with user \textsc{ML} code} + +You can do this as described in Section~\ref{Coqmktop} for a +custom coq text toplevel, simply by adding +option \verb|-ide| to \verb|coqmktop|, that is something like +\begin{quote} +\texttt{coqmktop -ide -byte $m_1$.cmo \ldots{} $m_n$.cmo} +\end{quote} +or +\begin{quote} +\texttt{coqmktop -ide -opt $m_1$.cmx \ldots{} $m_n$.cmx} +\end{quote} + + + +% $Id: RefMan-ide.tex 8626 2006-03-14 15:01:00Z notin $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-ind.tex b/doc/refman/RefMan-ind.tex new file mode 100644 index 00000000..d414e606 --- /dev/null +++ b/doc/refman/RefMan-ind.tex @@ -0,0 +1,498 @@ + +%\documentstyle[11pt]{article} +%\input{title} + +%\include{macros} +%\makeindex + +%\begin{document} +%\coverpage{The module {\tt Equality}}{Cristina CORNES} + +%\tableofcontents + +\chapter{Tactics for inductive types and families} +\label{Addoc-equality} + +This chapter details a few special tactics useful for inferring facts +from inductive hypotheses. They can be considered as tools that +macro-generate complicated uses of the basic elimination tactics for +inductive types. + +Sections \ref{inversion_introduction} to \ref{inversion_using} present +inversion tactics and section \ref{scheme} describes +a command {\tt Scheme} for automatic generation of induction schemes +for mutual inductive types. + +%\end{document} +%\documentstyle[11pt]{article} +%\input{title} + +%\begin{document} +%\coverpage{Module Inv: Inversion Tactics}{Cristina CORNES} + +\section{Generalities about inversion} +\label{inversion_introduction} +When working with (co)inductive predicates, we are very often faced to +some of these situations: +\begin{itemize} +\item we have an inconsistent instance of an inductive predicate in the + local context of hypotheses. Thus, the current goal can be trivially + proved by absurdity. + +\item we have a hypothesis that is an instance of an inductive + predicate, and the instance has some variables whose constraints we + would like to derive. +\end{itemize} + +The inversion tactics are very useful to simplify the work in these +cases. Inversion tools can be classified in three groups: +\begin{enumerate} +\item tactics for inverting an instance without stocking the inversion + lemma in the context: + (\texttt{Dependent}) \texttt{Inversion} and + (\texttt{Dependent}) \texttt{Inversion\_clear}. +\item commands for generating and stocking in the context the inversion + lemma corresponding to an instance: \texttt{Derive} + (\texttt{Dependent}) \texttt{Inversion}, \texttt{Derive} + (\texttt{Dependent}) \texttt{Inversion\_clear}. +\item tactics for inverting an instance using an already defined + inversion lemma: \texttt{Inversion \ldots using}. +\end{enumerate} + +These tactics work for inductive types of arity $(\vec{x}:\vec{T})s$ +where $s \in \{Prop,Set,Type\}$. Sections \ref{inversion_primitive}, +\ref{inversion_derivation} and \ref{inversion_using} +describe respectively each group of tools. + +As inversion proofs may be large in size, we recommend the user to +stock the lemmas whenever the same instance needs to be inverted +several times.\\ + +Let's consider the relation \texttt{Le} over natural numbers and the +following variables: + +\begin{coq_eval} +Restore State "Initial". +\end{coq_eval} + +\begin{coq_example*} +Inductive Le : nat -> nat -> Set := + | LeO : forall n:nat, Le 0%N n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). +Variable P : nat -> nat -> Prop. +Variable Q : forall n m:nat, Le n m -> Prop. +\end{coq_example*} + +For example purposes we defined \verb+Le: nat->nat->Set+ + but we may have defined +it \texttt{Le} of type \verb+nat->nat->Prop+ or \verb+nat->nat->Type+. + + +\section{Inverting an instance} +\label{inversion_primitive} +\subsection{The non dependent case} +\begin{itemize} + +\item \texttt{Inversion\_clear} \ident~\\ +\index{Inversion-clear@{\tt Inversion\_clear}} + Let the type of \ident~ in the local context be $(I~\vec{t})$, + where $I$ is a (co)inductive predicate. Then, + \texttt{Inversion} applied to \ident~ derives for each possible + constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary + conditions that should hold for the instance $(I~\vec{t})$ to be + proved by $c_i$. Finally it erases \ident~ from the context. + + + +For example, consider the goal: +\begin{coq_eval} +Lemma ex : forall n m:nat, Le (S n) m -> P n m. +intros. +\end{coq_eval} + +\begin{coq_example} +Show. +\end{coq_example} + +To prove the goal we may need to reason by cases on \texttt{H} and to + derive that \texttt{m} is necessarily of +the form $(S~m_0)$ for certain $m_0$ and that $(Le~n~m_0)$. +Deriving these conditions corresponds to prove that the +only possible constructor of \texttt{(Le (S n) m)} is +\texttt{LeS} and that we can invert the +\texttt{->} in the type of \texttt{LeS}. +This inversion is possible because \texttt{Le} is the smallest set closed by +the constructors \texttt{LeO} and \texttt{LeS}. + + +\begin{coq_example} +inversion_clear H. +\end{coq_example} + +Note that \texttt{m} has been substituted in the goal for \texttt{(S m0)} +and that the hypothesis \texttt{(Le n m0)} has been added to the +context. + +\item \texttt{Inversion} \ident~\\ +\index{Inversion@{\tt Inversion}} + This tactic differs from {\tt Inversion\_clear} in the fact that + it adds the equality constraints in the context and + it does not erase the hypothesis \ident. + + +In the previous example, {\tt Inversion\_clear} +has substituted \texttt{m} by \texttt{(S m0)}. Sometimes it is +interesting to have the equality \texttt{m=(S m0)} in the +context to use it after. In that case we can use \texttt{Inversion} that +does not clear the equalities: + +\begin{coq_example*} +Undo. +\end{coq_example*} +\begin{coq_example} +inversion H. +\end{coq_example} + +\begin{coq_eval} +Undo. +\end{coq_eval} + +Note that the hypothesis \texttt{(S m0)=m} has been deduced and +\texttt{H} has not been cleared from the context. + +\end{itemize} + +\begin{Variants} + +\item \texttt{Inversion\_clear } \ident~ \texttt{in} \ident$_1$ \ldots + \ident$_n$\\ +\index{Inversion_clear...in@{\tt Inversion\_clear...in}} + Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This + tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing + {\tt Inversion\_clear}. + +\item \texttt{Inversion } \ident~ \texttt{in} \ident$_1$ \ldots \ident$_n$\\ +\index{Inversion ... in@{\tt Inversion ... in}} + Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This + tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing + \texttt{Inversion}. + + +\item \texttt{Simple Inversion} \ident~ \\ +\index{Simple Inversion@{\tt Simple Inversion}} + It is a very primitive inversion tactic that derives all the necessary + equalities but it does not simplify + the constraints as \texttt{Inversion} and + {\tt Inversion\_clear} do. + +\end{Variants} + + +\subsection{The dependent case} +\begin{itemize} +\item \texttt{Dependent Inversion\_clear} \ident~\\ +\index{Dependent Inversion-clear@{\tt Dependent Inversion\_clear}} + Let the type of \ident~ in the local context be $(I~\vec{t})$, + where $I$ is a (co)inductive predicate, and let the goal depend both on + $\vec{t}$ and \ident. Then, + \texttt{Dependent Inversion\_clear} applied to \ident~ derives + for each possible constructor $c_i$ of $(I~\vec{t})$, {\bf all} the + necessary conditions that should hold for the instance $(I~\vec{t})$ to be + proved by $c_i$. It also substitutes \ident~ for the corresponding + term in the goal and it erases \ident~ from the context. + + +For example, consider the goal: +\begin{coq_eval} +Lemma ex_dep : forall (n m:nat) (H:Le (S n) m), Q (S n) m H. +intros. +\end{coq_eval} + +\begin{coq_example} +Show. +\end{coq_example} + +As \texttt{H} occurs in the goal, we may want to reason by cases on its +structure and so, we would like inversion tactics to +substitute \texttt{H} by the corresponding term in constructor form. +Neither \texttt{Inversion} nor {\tt Inversion\_clear} make such a +substitution. To have such a behavior we use the dependent inversion tactics: + +\begin{coq_example} +dependent inversion_clear H. +\end{coq_example} + +Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and +\texttt{m} by \texttt{(S m0)}. + + +\end{itemize} + +\begin{Variants} + +\item \texttt{Dependent Inversion\_clear } \ident~ \texttt{ with } \term\\ +\index{Dependent Inversion_clear...with@{\tt Dependent Inversion\_clear...with}} + \noindent Behaves as \texttt{Dependent Inversion\_clear} but allows to give + explicitly the good generalization of the goal. It is useful when + the system fails to generalize the goal automatically. If + \ident~ has type $(I~\vec{t})$ and $I$ has type + $(\vec{x}:\vec{T})s$, then \term~ must be of type + $I:(\vec{x}:\vec{T})(I~\vec{x})\rightarrow s'$ where $s'$ is the + type of the goal. + + + +\item \texttt{Dependent Inversion} \ident~\\ +\index{Dependent Inversion@{\tt Dependent Inversion}} + This tactic differs from \texttt{Dependent Inversion\_clear} in the fact that + it also adds the equality constraints in the context and + it does not erase the hypothesis \ident~. + +\item \texttt{Dependent Inversion } \ident~ \texttt{ with } \term \\ +\index{Dependent Inversion...with@{\tt Dependent Inversion...with}} + Analogous to \texttt{Dependent Inversion\_clear .. with..} above. +\end{Variants} + + + +\section{Deriving the inversion lemmas} +\label{inversion_derivation} +\subsection{The non dependent case} + +The tactics (\texttt{Dependent}) \texttt{Inversion} and (\texttt{Dependent}) +{\tt Inversion\_clear} work on a +certain instance $(I~\vec{t})$ of an inductive predicate. At each +application, they inspect the given instance and derive the +corresponding inversion lemma. If we have to invert the same +instance several times it is recommended to stock the lemma in the +context and to reuse it whenever we need it. + +The families of commands \texttt{Derive Inversion}, \texttt{Derive +Dependent Inversion}, \texttt{Derive} \\ {\tt Inversion\_clear} and \texttt{Derive Dependent Inversion\_clear} +allow to generate inversion lemmas for given instances and sorts. Next +section describes the tactic \texttt{Inversion}$\ldots$\texttt{using} that refines the +goal with a specified inversion lemma. + +\begin{itemize} + +\item \texttt{Derive Inversion\_clear} \ident~ \texttt{with} + $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ +\index{Derive Inversion_clear...with@{\tt Derive Inversion\_clear...with}} + Let $I$ be an inductive predicate and $\vec{x}$ the variables + occurring in $\vec{t}$. This command generates and stocks + the inversion lemma for the sort \sort~ corresponding to the instance + $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf + global} environment. When applied it is equivalent to have + inverted the instance with the tactic {\tt Inversion\_clear}. + + + For example, to generate the inversion lemma for the instance + \texttt{(Le (S n) m)} and the sort \texttt{Prop} we do: +\begin{coq_example} +Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort + Prop. +\end{coq_example} + +Let us inspect the type of the generated lemma: +\begin{coq_example} +Check leminv. +\end{coq_example} + + + +\end{itemize} + +%\variants +%\begin{enumerate} +%\item \verb+Derive Inversion_clear+ \ident$_1$ \ident$_2$ \\ +%\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} +% Let \ident$_1$ have type $(I~\vec{t})$ in the local context ($I$ +% an inductive predicate). Then, this command has the same semantics +% as \verb+Derive Inversion_clear+ \ident$_2$~ \verb+with+ +% $(\vec{x}:\vec{T})(I~\vec{t})$ \verb+Sort Prop+ where $\vec{x}$ are the free +% variables of $(I~\vec{t})$ declared in the local context (variables +% of the global context are considered as constants). +%\item \verb+Derive Inversion+ \ident$_1$~ \ident$_2$~\\ +%\index{Derive Inversion@{\tt Derive Inversion}} +% Analogous to the previous command. +%\item \verb+Derive Inversion+ $num$ \ident~ \ident~ \\ +%\index{Derive Inversion@{\tt Derive Inversion}} +% This command behaves as \verb+Derive Inversion+ \ident~ {\it +% namehyp} performed on the goal number $num$. +% +%\item \verb+Derive Inversion_clear+ $num$ \ident~ \ident~ \\ +%\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} +% This command behaves as \verb+Derive Inversion_clear+ \ident~ +% \ident~ performed on the goal number $num$. +%\end{enumerate} + + + +A derived inversion lemma is adequate for inverting the instance +with which it was generated, \texttt{Derive} applied to +different instances yields different lemmas. In general, if we generate +the inversion lemma with +an instance $(\vec{x}:\vec{T})(I~\vec{t})$ and a sort $s$, the inversion lemma will +expect a predicate of type $(\vec{x}:\vec{T})s$ as first argument. \\ + +\begin{Variant} +\item \texttt{Derive Inversion} \ident~ \texttt{with} + $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort\\ +\index{Derive Inversion...with@{\tt Derive Inversion...with}} + Analogous of \texttt{Derive Inversion\_clear .. with ..} but + when applied it is equivalent to having + inverted the instance with the tactic \texttt{Inversion}. +\end{Variant} + +\subsection{The dependent case} +\begin{itemize} +\item \texttt{Derive Dependent Inversion\_clear} \ident~ \texttt{with} + $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ +\index{Derive Dependent Inversion\_clear...with@{\tt Derive Dependent Inversion\_clear...with}} + Let $I$ be an inductive predicate. This command generates and stocks + the dependent inversion lemma for the sort \sort~ corresponding to the instance + $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf + global} environment. When applied it is equivalent to having + inverted the instance with the tactic \texttt{Dependent Inversion\_clear}. +\end{itemize} + +\begin{coq_example} +Derive Dependent Inversion_clear leminv_dep with + (forall n m:nat, Le (S n) m) Sort Prop. +\end{coq_example} + +\begin{coq_example} +Check leminv_dep. +\end{coq_example} + +\begin{Variants} +\item \texttt{Derive Dependent Inversion} \ident~ \texttt{with} + $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ +\index{Derive Dependent Inversion...with@{\tt Derive Dependent Inversion...with}} + Analogous to \texttt{Derive Dependent Inversion\_clear}, but when + applied it is equivalent to having + inverted the instance with the tactic \texttt{Dependent Inversion}. + +\end{Variants} + +\section{Using already defined inversion lemmas} +\label{inversion_using} +\begin{itemize} +\item \texttt{Inversion} \ident \texttt{ using} \ident$'$ \\ +\index{Inversion...using@{\tt Inversion...using}} + Let \ident~ have type $(I~\vec{t})$ ($I$ an inductive + predicate) in the local context, and \ident$'$ be a (dependent) inversion + lemma. Then, this tactic refines the current goal with the specified + lemma. + + +\begin{coq_eval} +Abort. +\end{coq_eval} + +\begin{coq_example} +Show. +\end{coq_example} +\begin{coq_example} +inversion H using leminv. +\end{coq_example} + + +\end{itemize} +\variant +\begin{enumerate} +\item \texttt{Inversion} \ident~ \texttt{using} \ident$'$ \texttt{in} \ident$_1$\ldots \ident$_n$\\ +\index{Inversion...using...in@{\tt Inversion...using...in}} +This tactic behaves as generalizing \ident$_1$\ldots \ident$_n$, +then doing \texttt{Use Inversion} \ident~\ident$'$. +\end{enumerate} + +\section{\tt Scheme ...}\index{Scheme@{\tt Scheme}}\label{Scheme} +\label{scheme} +The {\tt Scheme} command is a high-level tool for generating +automatically (possibly mutual) induction principles for given types +and sorts. Its syntax follows the schema : + +\noindent +{\tt Scheme {\ident$_1$} := Induction for \term$_1$ Sort {\sort$_1$} \\ + with\\ + \mbox{}\hspace{0.1cm} .. \\ + with {\ident$_m$} := Induction for {\term$_m$} Sort + {\sort$_m$}}\\ +\term$_1$ \ldots \term$_m$ are different inductive types belonging to +the same package of mutual inductive definitions. This command +generates {\ident$_1$}\ldots{\ident$_m$} to be mutually recursive +definitions. Each term {\ident$_i$} proves a general principle +of mutual induction for objects in type {\term$_i$}. + +\Example +The definition of principle of mutual induction for {\tt tree} and +{\tt forest} over the sort {\tt Set} is defined by the command: +\begin{coq_eval} +Restore State "Initial". +Variables A B : Set. +Inductive tree : Set := + node : A -> forest -> tree +with forest : Set := + | leaf : B -> forest + | cons : tree -> forest -> forest. +\end{coq_eval} +\begin{coq_example*} +Scheme tree_forest_rec := Induction for tree + Sort Set + with forest_tree_rec := Induction for forest Sort Set. +\end{coq_example*} +You may now look at the type of {\tt tree\_forest\_rec} : +\begin{coq_example} +Check tree_forest_rec. +\end{coq_example} +This principle involves two different predicates for {\tt trees} and +{\tt forests}; it also has three premises each one corresponding to a +constructor of one of the inductive definitions. + +The principle {\tt tree\_forest\_rec} shares exactly the same +premises, only the conclusion now refers to the property of forests. +\begin{coq_example} +Check forest_tree_rec. +\end{coq_example} + +\begin{Variant} +\item {\tt Scheme {\ident$_1$} := Minimality for \term$_1$ Sort {\sort$_1$} \\ + with\\ + \mbox{}\hspace{0.1cm} .. \\ + with {\ident$_m$} := Minimality for {\term$_m$} Sort + {\sort$_m$}}\\ +Same as before but defines a non-dependent elimination principle more +natural in case of inductively defined relations. +\end{Variant} + +\Example +With the predicates {\tt odd} and {\tt even} inductively defined as: +\begin{coq_eval} +Restore State "Initial". +\end{coq_eval} +\begin{coq_example*} +Inductive odd : nat -> Prop := + oddS : forall n:nat, even n -> odd (S n) +with even : nat -> Prop := + | evenO : even 0%N + | evenS : forall n:nat, odd n -> even (S n). +\end{coq_example*} +The following command generates a powerful elimination +principle: +\begin{coq_example*} +Scheme odd_even := Minimality for odd Sort Prop + with even_odd := Minimality for even Sort Prop. +\end{coq_example*} +The type of {\tt odd\_even} for instance will be: +\begin{coq_example} +Check odd_even. +\end{coq_example} +The type of {\tt even\_odd} shares the same premises but the +conclusion is {\tt (n:nat)(even n)->(Q n)}. + + + +%\end{document} + +% $Id: RefMan-ind.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ diff --git a/doc/refman/RefMan-int.tex b/doc/refman/RefMan-int.tex new file mode 100644 index 00000000..258d0ca0 --- /dev/null +++ b/doc/refman/RefMan-int.tex @@ -0,0 +1,147 @@ +\setheaders{Introduction} +\chapter*{Introduction} + +This document is the Reference Manual of version \coqversion{} of the \Coq\ +proof assistant. A companion volume, the \Coq\ Tutorial, is provided +for the beginners. It is advised to read the Tutorial first. +A new book~\cite{CoqArt} on practical uses of the \Coq{} system will +be published in 2004 and is a good support for both the beginner and +the advanced user. + +%The system \Coq\ is designed to develop mathematical proofs. It can be +%used by mathematicians to develop mathematical theories and by +%computer scientists to write formal specifications, +The \Coq{} system is designed to develop mathematical proofs, and +especially to write formal specifications, programs and to verify that +programs are correct with respect to their specification. It provides +a specification language named \gallina. Terms of \gallina\ can +represent programs as well as properties of these programs and proofs +of these properties. Using the so-called \textit{Curry-Howard + isomorphism}, programs, properties and proofs are formalized in the +same language called \textit{Calculus of Inductive Constructions}, +that is a $\lambda$-calculus with a rich type system. All logical +judgments in \Coq\ are typing judgments. The very heart of the Coq +system is the type-checking algorithm that checks the correctness of +proofs, in other words that checks that a program complies to its +specification. \Coq\ also provides an interactive proof assistant to +build proofs using specific programs called \textit{tactics}. + +All services of the \Coq\ proof assistant are accessible by +interpretation of a command language called \textit{the vernacular}. + +\Coq\ has an interactive mode in which commands are interpreted as the +user types them in from the keyboard and a compiled mode where +commands are processed from a file. + +\begin{itemize} +\item The interactive mode may be used as a debugging mode in which + the user can develop his theories and proofs step by step, + backtracking if needed and so on. The interactive mode is run with + the {\tt coqtop} command from the operating system (which we shall + assume to be some variety of UNIX in the rest of this document). +\item The compiled mode acts as a proof checker taking a file + containing a whole development in order to ensure its correctness. + Moreover, \Coq's compiler provides an output file containing a + compact representation of its input. The compiled mode is run with + the {\tt coqc} command from the operating system. + +\end{itemize} +These two modes are documented in chapter \ref{Addoc-coqc}. + +Other modes of interaction with \Coq{} are possible: through an emacs +shell window, an emacs generic user-interface for proof assistant +(ProofGeneral~\cite{ProofGeneral}) or through a customized interface +(PCoq~\cite{Pcoq}). These facilities are not documented here. There +is also a \Coq{} Integrated Development Environment described in +Chapter~\ref{Addoc-coqide}. + +\section*{How to read this book} + +This is a Reference Manual, not a User Manual, then it is not made for a +continuous reading. However, it has some structure that is explained +below. + +\begin{itemize} +\item The first part describes the specification language, + Gallina. Chapters~\ref{Gallina} and~\ref{Gallina-extension} + describe the concrete syntax as well as the meaning of programs, + theorems and proofs in the Calculus of Inductive + Constructions. Chapter~\ref{Theories} describes the standard library + of \Coq. Chapter~\ref{Cic} is a mathematical description of the + formalism. Chapter~\ref{chapter:Modules} describes the module system. + +\item The second part describes the proof engine. It is divided in + five chapters. Chapter~\ref{Vernacular-commands} presents all + commands (we call them \emph{vernacular commands}) that are not + directly related to interactive proving: requests to the + environment, complete or partial evaluation, loading and compiling + files. How to start and stop proofs, do multiple proofs in parallel + is explained in Chapter~\ref{Proof-handling}. In + Chapter~\ref{Tactics}, all commands that realize one or more steps + of the proof are presented: we call them \emph{tactics}. The + language to combine these tactics into complex proof strategies is + given in Chapter~\ref{TacticLanguage}. Examples of tactics are + described in Chapter~\ref{Tactics-examples}. + +%\item The third part describes how to extend the system in two ways: +% adding parsing and pretty-printing rules +% (Chapter~\ref{Addoc-syntax}) and writing new tactics +% (Chapter~\ref{TacticLanguage}). + +\item The third part describes how to extend the syntax of \Coq. It +corresponds to the Chapter~\ref{Addoc-syntax}. + +\item In the fourth part more practical tools are documented. First in + Chapter~\ref{Addoc-coqc}, the usage of \texttt{coqc} (batch mode) + and \texttt{coqtop} (interactive mode) with their options is + described. Then, in Chapter~\ref{Utilities}, + various utilities that come with the \Coq\ distribution are + presented. + Finally, Chapter~\ref{Addoc-coqide} describes the \Coq{} integrated + development environment. +\end{itemize} + +At the end of the document, after the global index, the user can find +specific indexes for tactics, vernacular commands, and error +messages. + +\section*{List of additional documentation} + +This manual does not contain all the documentation the user may need +about \Coq{}. Various informations can be found in the following +documents: +\begin{description} + +\item[Tutorial] + A companion volume to this reference manual, the \Coq{} Tutorial, is + aimed at gently introducing new users to developing proofs in \Coq{} + without assuming prior knowledge of type theory. In a second step, the + user can read also the tutorial on recursive types (document {\tt + RecTutorial.ps}). + +\item[Addendum] The fifth part (the Addendum) of the Reference Manual + is distributed as a separate document. It contains more + detailed documentation and examples about some specific aspects of the + system that may interest only certain users. It shares the indexes, + the page numbers and + the bibliography with the Reference Manual. If you see in one of the + indexes a page number that is outside the Reference Manual, it refers + to the Addendum. + +\item[Installation] A text file INSTALL that comes with the sources + explains how to install \Coq{}. + +\item[The \Coq{} standard library] +A commented version of sources of the \Coq{} standard library +(including only the specifications, the proofs are removed) +is given in the additional document {\tt Library.ps}. + +\end{description} + + +% $Id: RefMan-int.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-lib.tex b/doc/refman/RefMan-lib.tex new file mode 100644 index 00000000..f9a5f975 --- /dev/null +++ b/doc/refman/RefMan-lib.tex @@ -0,0 +1,1102 @@ +\chapter{The {\Coq} library} +\index{Theories}\label{Theories} + +The \Coq\ library is structured into three parts: + +\begin{description} +\item[The initial library:] it contains + elementary logical notions and datatypes. It constitutes the + basic state of the system directly available when running + \Coq; + +\item[The standard library:] general-purpose libraries containing + various developments of \Coq\ axiomatizations about sets, lists, + sorting, arithmetic, etc. This library comes with the system and its + modules are directly accessible through the \verb!Require! command + (see section~\ref{Require}); + +\item[User contributions:] Other specification and proof developments + coming from the \Coq\ users' community. These libraries are no + longer distributed with the system. They are available by anonymous + FTP (see section~\ref{Contributions}). +\end{description} + +This chapter briefly reviews these libraries. + +\section{The basic library} +\label{Prelude} + +This section lists the basic notions and results which are directly +available in the standard \Coq\ system +\footnote{Most of these constructions are defined in the +{\tt Prelude} module in directory {\tt theories/Init} at the {\Coq} +root directory; this includes the modules +{\tt Notations}, +{\tt Logic}, +{\tt Datatypes}, +{\tt Specif}, +{\tt Peano}, +and {\tt Wf}. +Module {\tt Logic\_Type} also makes it in the initial state}. + +\subsection{Notations} \label{Notations} + +This module defines the parsing and pretty-printing of many symbols +(infixes, prefixes, etc.). However, it does not assign a meaning to these +notations. The purpose of this is to define precedence and +associativity of very common notations, and avoid users to use them +with other precedence, which may be confusing. + +\begin{figure} +\begin{center} +\begin{tabular}{|cll|} +\hline +Notation & Precedence & Associativity \\ +\hline +\verb!_ <-> _! & 95 & no \\ +\verb!_ \/ _! & 85 & right \\ +\verb!_ /\ _! & 80 & right \\ +\verb!~ _! & 75 & right \\ +\verb!_ = _! & 70 & no \\ +\verb!_ = _ = _! & 70 & no \\ +\verb!_ = _ :> _! & 70 & no \\ +\verb!_ <> _! & 70 & no \\ +\verb!_ <> _ :> _! & 70 & no \\ +\verb!_ < _! & 70 & no \\ +\verb!_ > _! & 70 & no \\ +\verb!_ <= _! & 70 & no \\ +\verb!_ >= _! & 70 & no \\ +\verb!_ < _ < _! & 70 & no \\ +\verb!_ < _ <= _! & 70 & no \\ +\verb!_ <= _ < _! & 70 & no \\ +\verb!_ <= _ <= _! & 70 & no \\ +\verb!_ + _! & 50 & left \\ +\verb!_ - _! & 50 & left \\ +\verb!_ * _! & 40 & left \\ +\verb!_ / _! & 40 & left \\ +\verb!- _! & 35 & right \\ +\verb!/ _! & 35 & right \\ +\verb!_ ^ _! & 30 & right \\ +\hline +\end{tabular} +\end{center} +\caption{Notations in the initial state} +\label{init-notations} +\end{figure} + +\subsection{Logic} +\label{Logic} + +\begin{figure} +\begin{centerframe} +\begin{tabular}{lclr} +{\form} & ::= & {\tt True} & ({\tt True})\\ + & $|$ & {\tt False} & ({\tt False})\\ + & $|$ & {\tt\char'176} {\form} & ({\tt not})\\ + & $|$ & {\form} {\tt /$\backslash$} {\form} & ({\tt and})\\ + & $|$ & {\form} {\tt $\backslash$/} {\form} & ({\tt or})\\ + & $|$ & {\form} {\tt ->} {\form} & (\em{primitive implication})\\ + & $|$ & {\form} {\tt <->} {\form} & ({\tt iff})\\ + & $|$ & {\tt forall} {\ident} {\tt :} {\type} {\tt ,} + {\form} & (\em{primitive for all})\\ + & $|$ & {\tt exists} {\ident} \zeroone{{\tt :} {\specif}} {\tt + ,} {\form} & ({\tt ex})\\ + & $|$ & {\tt exists2} {\ident} \zeroone{{\tt :} {\specif}} {\tt + ,} {\form} {\tt \&} {\form} & ({\tt ex2})\\ + & $|$ & {\term} {\tt =} {\term} & ({\tt eq})\\ + & $|$ & {\term} {\tt =} {\term} {\tt :>} {\specif} & ({\tt eq}) +\end{tabular} +\end{centerframe} +\caption{Syntax of formulas} +\label{formulas-syntax} +\end{figure} + +The basic library of {\Coq} comes with the definitions of standard +(intuitionistic) logical connectives (they are defined as inductive +constructions). They are equipped with an appealing syntax enriching the +(subclass {\form}) of the syntactic class {\term}. The syntax +extension is shown on figure \ref{formulas-syntax}. + +% The basic library of {\Coq} comes with the definitions of standard +% (intuitionistic) logical connectives (they are defined as inductive +% constructions). They are equipped with an appealing syntax enriching +% the (subclass {\form}) of the syntactic class {\term}. The syntax +% extension \footnote{This syntax is defined in module {\tt +% LogicSyntax}} is shown on Figure~\ref{formulas-syntax}. + +\Rem Implication is not defined but primitive (it is a non-dependent +product of a proposition over another proposition). There is also a +primitive universal quantification (it is a dependent product over a +proposition). The primitive universal quantification allows both +first-order and higher-order quantification. + +\subsubsection{Propositional Connectives} \label{Connectives} +\index{Connectives} + +First, we find propositional calculus connectives: +\ttindex{True} +\ttindex{I} +\ttindex{False} +\ttindex{not} +\ttindex{and} +\ttindex{conj} +\ttindex{proj1} +\ttindex{proj2} + +\begin{coq_eval} +Set Printing Depth 50. +\end{coq_eval} +\begin{coq_example*} +Inductive True : Prop := I. +Inductive False : Prop := . +Definition not (A: Prop) := A -> False. +Inductive and (A B:Prop) : Prop := conj (_:A) (_:B). +Section Projections. +Variables A B : Prop. +Theorem proj1 : A /\ B -> A. +Theorem proj2 : A /\ B -> B. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\ttindex{or} +\ttindex{or\_introl} +\ttindex{or\_intror} +\ttindex{iff} +\ttindex{IF\_then\_else} +\begin{coq_example*} +End Projections. +Inductive or (A B:Prop) : Prop := + | or_introl (_:A) + | or_intror (_:B). +Definition iff (P Q:Prop) := (P -> Q) /\ (Q -> P). +Definition IF_then_else (P Q R:Prop) := P /\ Q \/ ~ P /\ R. +\end{coq_example*} + +\subsubsection{Quantifiers} \label{Quantifiers} +\index{Quantifiers} + +Then we find first-order quantifiers: +\ttindex{all} +\ttindex{ex} +\ttindex{exists} +\ttindex{ex\_intro} +\ttindex{ex2} +\ttindex{exists2} +\ttindex{ex\_intro2} + +\begin{coq_example*} +Definition all (A:Set) (P:A -> Prop) := forall x:A, P x. +Inductive ex (A: Set) (P:A -> Prop) : Prop := + ex_intro (x:A) (_:P x). +Inductive ex2 (A:Set) (P Q:A -> Prop) : Prop := + ex_intro2 (x:A) (_:P x) (_:Q x). +\end{coq_example*} + +The following abbreviations are allowed: +\begin{center} + \begin{tabular}[h]{|l|l|} + \hline + \verb+exists x:A, P+ & \verb+ex A (fun x:A => P)+ \\ + \verb+exists x, P+ & \verb+ex _ (fun x => P)+ \\ + \verb+exists2 x:A, P & Q+ & \verb+ex2 A (fun x:A => P) (fun x:A => Q)+ \\ + \verb+exists2 x, P & Q+ & \verb+ex2 _ (fun x => P) (fun x => Q)+ \\ + \hline + \end{tabular} +\end{center} + +The type annotation \texttt{:A} can be omitted when \texttt{A} can be +synthesized by the system. + +\subsubsection{Equality} \label{Equality} +\index{Equality} + +Then, we find equality, defined as an inductive relation. That is, +given a \verb:Type: \verb:A: and an \verb:x: of type \verb:A:, the +predicate \verb:(eq A x): is the smallest one which contains \verb:x:. +This definition, due to Christine Paulin-Mohring, is equivalent to +define \verb:eq: as the smallest reflexive relation, and it is also +equivalent to Leibniz' equality. + +\ttindex{eq} +\ttindex{refl\_equal} + +\begin{coq_example*} +Inductive eq (A:Type) (x:A) : A -> Prop := + refl_equal : eq A x x. +\end{coq_example*} + +\subsubsection{Lemmas} +\label{PreludeLemmas} + +Finally, a few easy lemmas are provided. + +\ttindex{absurd} + +\begin{coq_example*} +Theorem absurd : forall A C:Prop, A -> ~ A -> C. +\end{coq_example*} +\begin{coq_eval} +Abort. +\end{coq_eval} +\ttindex{sym\_eq} +\ttindex{trans\_eq} +\ttindex{f\_equal} +\ttindex{sym\_not\_eq} +\begin{coq_example*} +Section equality. +Variables A B : Type. +Variable f : A -> B. +Variables x y z : A. +Theorem sym_eq : x = y -> y = x. +Theorem trans_eq : x = y -> y = z -> x = z. +Theorem f_equal : x = y -> f x = f y. +Theorem sym_not_eq : x <> y -> y <> x. +\end{coq_example*} +\begin{coq_eval} +Abort. +Abort. +Abort. +Abort. +\end{coq_eval} +\ttindex{eq\_ind\_r} +\ttindex{eq\_rec\_r} +\ttindex{eq\_rect} +\ttindex{eq\_rect\_r} +%Definition eq_rect: (A:Set)(x:A)(P:A->Type)(P x)->(y:A)(x=y)->(P y). +\begin{coq_example*} +End equality. +Definition eq_ind_r : + forall (A:Type) (x:A) (P:A -> Prop), P x -> forall y:A, y = x -> P y. +Definition eq_rec_r : + forall (A:Type) (x:A) (P:A -> Set), P x -> forall y:A, y = x -> P y. +Definition eq_rect_r : + forall (A:Type) (x:A) (P:A -> Type), P x -> forall y:A, y = x -> P y. +\end{coq_example*} +\begin{coq_eval} +Abort. +Abort. +Abort. +\end{coq_eval} +%Abort (for now predefined eq_rect) +\begin{coq_example*} +Hint Immediate sym_eq sym_not_eq : core. +\end{coq_example*} +\ttindex{f\_equal$i$} + +The theorem {\tt f\_equal} is extended to functions with two to five +arguments. The theorem are names {\tt f\_equal2}, {\tt f\_equal3}, +{\tt f\_equal4} and {\tt f\_equal5}. +For instance {\tt f\_equal3} is defined the following way. +\begin{coq_example*} +Theorem f_equal3 : + forall (A1 A2 A3 B:Type) (f:A1 -> A2 -> A3 -> B) (x1 y1:A1) (x2 y2:A2) + (x3 y3:A3), x1 = y1 -> x2 = y2 -> x3 = y3 -> f x1 x2 x3 = f y1 y2 y3. +\end{coq_example*} +\begin{coq_eval} +Abort. +\end{coq_eval} + +\subsection{Datatypes} +\label{Datatypes} +\index{Datatypes} + +\begin{figure} +\begin{centerframe} +\begin{tabular}{rclr} +{\specif} & ::= & {\specif} {\tt *} {\specif} & ({\tt prod})\\ + & $|$ & {\specif} {\tt +} {\specif} & ({\tt sum})\\ + & $|$ & {\specif} {\tt + \{} {\specif} {\tt \}} & ({\tt sumor})\\ + & $|$ & {\tt \{} {\specif} {\tt \} + \{} {\specif} {\tt \}} & + ({\tt sumbool})\\ + & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \}} + & ({\tt sig})\\ + & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \&} + {\form} {\tt \}} & ({\tt sig2})\\ + & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt + \}} & ({\tt sigS})\\ + & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt + \&} {\specif} {\tt \}} & ({\tt sigS2})\\ + & & & \\ +{\term} & ::= & {\tt (} {\term} {\tt ,} {\term} {\tt )} & ({\tt pair}) +\end{tabular} +\end{centerframe} +\caption{Syntax of datatypes and specifications} +\label{specif-syntax} +\end{figure} + + +In the basic library, we find the definition\footnote{They are in {\tt + Datatypes.v}} of the basic data-types of programming, again +defined as inductive constructions over the sort \verb:Set:. Some of +them come with a special syntax shown on Figure~\ref{specif-syntax}. + +\subsubsection{Programming} +\label{Programming} +\index{Programming} +\label{libnats} + +\ttindex{unit} +\ttindex{tt} +\ttindex{bool} +\ttindex{true} +\ttindex{false} +\ttindex{nat} +\ttindex{O} +\ttindex{S} +\ttindex{option} +\ttindex{Some} +\ttindex{None} +\ttindex{identity} +\ttindex{refl\_identity} + +\begin{coq_example*} +Inductive unit : Set := tt. +Inductive bool : Set := true | false. +Inductive nat : Set := O | S (n:nat). +Inductive option (A:Set) : Set := Some (_:A) | None. +Inductive identity (A:Type) (a:A) : A -> Type := + refl_identity : identity A a a. +\end{coq_example*} + +Note that zero is the letter \verb:O:, and {\sl not} the numeral +\verb:0:. + +{\tt identity} is logically equivalent to equality but it lives in +sort {\tt Set}. Computationaly, it behaves like {\tt unit}. + +We then define the disjoint sum of \verb:A+B: of two sets \verb:A: and +\verb:B:, and their product \verb:A*B:. +\ttindex{sum} +\ttindex{A+B} +\ttindex{+} +\ttindex{inl} +\ttindex{inr} +\ttindex{prod} +\ttindex{A*B} +\ttindex{*} +\ttindex{pair} +\ttindex{fst} +\ttindex{snd} + +\begin{coq_example*} +Inductive sum (A B:Set) : Set := inl (_:A) | inr (_:B). +Inductive prod (A B:Set) : Set := pair (_:A) (_:B). +Section projections. +Variables A B : Set. +Definition fst (H: prod A B) := match H with + | pair x y => x + end. +Definition snd (H: prod A B) := match H with + | pair x y => y + end. +End projections. +\end{coq_example*} + +\subsection{Specification} + +The following notions\footnote{They are defined in module {\tt +Specif.v}} allows to build new datatypes and specifications. +They are available with the syntax shown on +Figure~\ref{specif-syntax}\footnote{This syntax can be found in the module +{\tt SpecifSyntax.v}}. + +For instance, given \verb|A:Set| and \verb|P:A->Prop|, the construct +\verb+{x:A | P x}+ (in abstract syntax \verb+(sig A P)+) is a +\verb:Set:. We may build elements of this set as \verb:(exist x p): +whenever we have a witness \verb|x:A| with its justification +\verb|p:P x|. + +From such a \verb:(exist x p): we may in turn extract its witness +\verb|x:A| (using an elimination construct such as \verb:match:) but +{\sl not} its justification, which stays hidden, like in an abstract +data type. In technical terms, one says that \verb:sig: is a ``weak +(dependent) sum''. A variant \verb:sig2: with two predicates is also +provided. + +\index{\{x:A "| (P x)\}} +\index{"|} +\ttindex{sig} +\ttindex{exist} +\ttindex{sig2} +\ttindex{exist2} + +\begin{coq_example*} +Inductive sig (A:Set) (P:A -> Prop) : Set := exist (x:A) (_:P x). +Inductive sig2 (A:Set) (P Q:A -> Prop) : Set := + exist2 (x:A) (_:P x) (_:Q x). +\end{coq_example*} + +A ``strong (dependent) sum'' \verb+{x:A & (P x)}+ may be also defined, +when the predicate \verb:P: is now defined as a \verb:Set: +constructor. + +\ttindex{\{x:A \& (P x)\}} +\ttindex{\&} +\ttindex{sigS} +\ttindex{existS} +\ttindex{projS1} +\ttindex{projS2} +\ttindex{sigS2} +\ttindex{existS2} + +\begin{coq_example*} +Inductive sigS (A:Set) (P:A -> Set) : Set := existS (x:A) (_:P x). +Section sigSprojections. +Variable A : Set. +Variable P : A -> Set. +Definition projS1 (H:sigS A P) := let (x, h) := H in x. +Definition projS2 (H:sigS A P) := + match H return P (projS1 H) with + existS x h => h + end. +End sigSprojections. +Inductive sigS2 (A: Set) (P Q:A -> Set) : Set := + existS2 (x:A) (_:P x) (_:Q x). +\end{coq_example*} + +A related non-dependent construct is the constructive sum +\verb"{A}+{B}" of two propositions \verb:A: and \verb:B:. +\label{sumbool} +\ttindex{sumbool} +\ttindex{left} +\ttindex{right} +\ttindex{\{A\}+\{B\}} + +\begin{coq_example*} +Inductive sumbool (A B:Prop) : Set := left (_:A) | right (_:B). +\end{coq_example*} + +This \verb"sumbool" construct may be used as a kind of indexed boolean +data type. An intermediate between \verb"sumbool" and \verb"sum" is +the mixed \verb"sumor" which combines \verb"A:Set" and \verb"B:Prop" +in the \verb"Set" \verb"A+{B}". +\ttindex{sumor} +\ttindex{inleft} +\ttindex{inright} +\ttindex{A+\{B\}} + +\begin{coq_example*} +Inductive sumor (A:Set) (B:Prop) : Set := inleft (_:A) | inright (_:B). +\end{coq_example*} + +We may define variants of the axiom of choice, like in Martin-Löf's +Intuitionistic Type Theory. +\ttindex{Choice} +\ttindex{Choice2} +\ttindex{bool\_choice} + +\begin{coq_example*} +Lemma Choice : + forall (S S':Set) (R:S -> S' -> Prop), + (forall x:S, {y : S' | R x y}) -> + {f : S -> S' | forall z:S, R z (f z)}. +Lemma Choice2 : + forall (S S':Set) (R:S -> S' -> Set), + (forall x:S, {y : S' & R x y}) -> + {f : S -> S' & forall z:S, R z (f z)}. +Lemma bool_choice : + forall (S:Set) (R1 R2:S -> Prop), + (forall x:S, {R1 x} + {R2 x}) -> + {f : S -> bool | + forall x:S, f x = true /\ R1 x \/ f x = false /\ R2 x}. +\end{coq_example*} +\begin{coq_eval} +Abort. +Abort. +Abort. +\end{coq_eval} + +The next constructs builds a sum between a data type \verb|A:Set| and +an exceptional value encoding errors: + +\ttindex{Exc} +\ttindex{value} +\ttindex{error} + +\begin{coq_example*} +Definition Exc := option. +Definition value := Some. +Definition error := None. +\end{coq_example*} + + +This module ends with theorems, +relating the sorts \verb:Set: and +\verb:Prop: in a way which is consistent with the realizability +interpretation. +\ttindex{False\_rec} +\ttindex{eq\_rec} +\ttindex{Except} +\ttindex{absurd\_set} +\ttindex{and\_rec} + +%Lemma False_rec : (P:Set)False->P. +%Lemma False_rect : (P:Type)False->P. +\begin{coq_example*} +Definition except := False_rec. +Notation Except := (except _). +Theorem absurd_set : forall (A:Prop) (C:Set), A -> ~ A -> C. +Theorem and_rec : + forall (A B:Prop) (P:Set), (A -> B -> P) -> A /\ B -> P. +\end{coq_example*} +%\begin{coq_eval} +%Abort. +%Abort. +%\end{coq_eval} + +\subsection{Basic Arithmetics} + +The basic library includes a few elementary properties of natural +numbers, together with the definitions of predecessor, addition and +multiplication\footnote{This is in module {\tt Peano.v}}. It also +provides a scope {\tt nat\_scope} gathering standard notations for +common operations (+,*) and a decimal notation for numbers. That is he +can write \texttt{3} for \texttt{(S (S (S O)))}. This also works on +the left hand side of a \texttt{match} expression (see for example +section~\ref{refine-example}). This scope is opened by default. + +%Remove the redefinition of nat +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +The following example is not part of the standard library, but it +shows the usage of the notations: + +\begin{coq_example*} +Fixpoint even (n:nat) : bool := + match n with + | 0 => true + | 1 => false + | S (S n) => even n + end. +\end{coq_example*} + + +\ttindex{eq\_S} +\ttindex{pred} +\ttindex{pred\_Sn} +\ttindex{eq\_add\_S} +\ttindex{not\_eq\_S} +\ttindex{IsSucc} +\ttindex{O\_S} +\ttindex{n\_Sn} +\ttindex{plus} +\ttindex{plus\_n\_O} +\ttindex{plus\_n\_Sm} +\ttindex{mult} +\ttindex{mult\_n\_O} +\ttindex{mult\_n\_Sm} + +\begin{coq_example*} +Theorem eq_S : forall x y:nat, x = y -> S x = S y. +\end{coq_example*} +\begin{coq_eval} +Abort. +\end{coq_eval} +\begin{coq_example*} +Definition pred (n:nat) : nat := + match n with + | 0 => 0 + | S u => u + end. +Theorem pred_Sn : forall m:nat, m = pred (S m). +Theorem eq_add_S : forall n m:nat, S n = S m -> n = m. +Hint Immediate eq_add_S : core. +Theorem not_eq_S : forall n m:nat, n <> m -> S n <> S m. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\begin{coq_example*} +Definition IsSucc (n:nat) : Prop := + match n with + | 0 => False + | S p => True + end. +Theorem O_S : forall n:nat, 0 <> S n. +Theorem n_Sn : forall n:nat, n <> S n. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\begin{coq_example*} +Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | 0 => m + | S p => S (plus p m) + end. +Lemma plus_n_O : forall n:nat, n = plus n 0. +Lemma plus_n_Sm : forall n m:nat, S (plus n m) = plus n (S m). +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\begin{coq_example*} +Fixpoint mult (n m:nat) {struct n} : nat := + match n with + | 0 => 0 + | S p => m + mult p m + end. +Lemma mult_n_O : forall n:nat, 0 = mult n 0. +Lemma mult_n_Sm : forall n m:nat, plus (mult n m) n = mult n (S m). +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} + +Finally, it gives the definition of the usual orderings \verb:le:, +\verb:lt:, \verb:ge:, and \verb:gt:. +\ttindex{le} +\ttindex{le\_n} +\ttindex{le\_S} +\ttindex{lt} +\ttindex{ge} +\ttindex{gt} + +\begin{coq_example*} +Inductive le (n:nat) : nat -> Prop := + | le_n : le n n + | le_S : forall m:nat, le n m -> le n (S m). +Infix "+" := plus : nat_scope. +Definition lt (n m:nat) := S n <= m. +Definition ge (n m:nat) := m <= n. +Definition gt (n m:nat) := m < n. +\end{coq_example*} + +Properties of these relations are not initially known, but may be +required by the user from modules \verb:Le: and \verb:Lt:. Finally, +\verb:Peano: gives some lemmas allowing pattern-matching, and a double +induction principle. + +\ttindex{nat\_case} +\ttindex{nat\_double\_ind} + +\begin{coq_example*} +Theorem nat_case : + forall (n:nat) (P:nat -> Prop), P 0 -> (forall m:nat, P (S m)) -> P n. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\begin{coq_example*} +Theorem nat_double_ind : + forall R:nat -> nat -> Prop, + (forall n:nat, R 0 n) -> + (forall n:nat, R (S n) 0) -> + (forall n m:nat, R n m -> R (S n) (S m)) -> forall n m:nat, R n m. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} + +\subsection{Well-founded recursion} + +The basic library contains the basics of well-founded recursion and +well-founded induction\footnote{This is defined in module {\tt Wf.v}}. +\index{Well foundedness} +\index{Recursion} +\index{Well founded induction} +\ttindex{Acc} +\ttindex{Acc\_inv} +\ttindex{Acc\_rec} +\ttindex{well\_founded} + +\begin{coq_example*} +Section Well_founded. +Variable A : Set. +Variable R : A -> A -> Prop. +Inductive Acc : A -> Prop := + Acc_intro : forall x:A, (forall y:A, R y x -> Acc y) -> Acc x. +Lemma Acc_inv : forall x:A, Acc x -> forall y:A, R y x -> Acc y. +\end{coq_example*} +\begin{coq_eval} +simple destruct 1; trivial. +Defined. +\end{coq_eval} +\begin{coq_example*} +Section AccRec. +Variable P : A -> Set. +Variable F : + forall x:A, + (forall y:A, R y x -> Acc y) -> (forall y:A, R y x -> P y) -> P x. +Fixpoint Acc_rec (x:A) (a:Acc x) {struct a} : P x := + F x (Acc_inv x a) + (fun (y:A) (h:R y x) => Acc_rec y (Acc_inv x a y h)). +End AccRec. +Definition well_founded := forall a:A, Acc a. +Hypothesis Rwf : well_founded. +Theorem well_founded_induction : + forall P:A -> Set, + (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. +Theorem well_founded_ind : + forall P:A -> Prop, + (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +{\tt Acc\_rec} can be used to define functions by fixpoints using +well-founded relations to justify termination. Assuming +extensionality of the functional used for the recursive call, the +fixpoint equation can be proved. +\ttindex{Fix\_F} +\ttindex{fix\_eq} +\ttindex{Fix\_F\_inv} +\ttindex{Fix\_F\_eq} +\begin{coq_example*} +Section FixPoint. +Variable P : A -> Set. +Variable F : forall x:A, (forall y:A, R y x -> P y) -> P x. +Fixpoint Fix_F (x:A) (r:Acc x) {struct r} : P x := + F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)). +Definition Fix (x:A) := Fix_F x (Rwf x). +Hypothesis F_ext : + forall (x:A) (f g:forall y:A, R y x -> P y), + (forall (y:A) (p:R y x), f y p = g y p) -> F x f = F x g. +Lemma Fix_F_eq : + forall (x:A) (r:Acc x), + F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)) = Fix_F x r. +Lemma Fix_F_inv : forall (x:A) (r s:Acc x), Fix_F x r = Fix_F x s. +Lemma fix_eq : forall x:A, Fix x = F x (fun (y:A) (p:R y x) => Fix y). +\end{coq_example*} +\begin{coq_eval} +Abort All. +\end{coq_eval} +\begin{coq_example*} +End FixPoint. +End Well_founded. +\end{coq_example*} + +\subsection{Accessing the {\Type} level} + +The basic library includes the definitions\footnote{This is in module +{\tt Logic\_Type.v}} of the counterparts of some datatypes and logical +quantifiers at the \verb:Type: level: negation, pair, and properties +of {\tt identity}. + +\ttindex{notT} +\ttindex{prodT} +\ttindex{pairT} +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example*} +Definition notT (A:Type) := A -> False. +Inductive prodT (A B:Type) : Type := pairT (_:A) (_:B). +\end{coq_example*} + + +At the end, it defines datatypes at the {\Type} level. + +\section{The standard library} + +\subsection{Survey} + +The rest of the standard library is structured into the following +subdirectories: + +\begin{tabular}{lp{12cm}} + {\bf Logic} & Classical logic and dependent equality \\ + {\bf Arith} & Basic Peano arithmetic \\ + {\bf ZArith} & Basic integer arithmetic \\ + {\bf Bool} & Booleans (basic functions and results) \\ + {\bf Lists} & Monomorphic and polymorphic lists (basic functions and + results), Streams (infinite sequences defined with co-inductive + types) \\ + {\bf Sets} & Sets (classical, constructive, finite, infinite, power set, + etc.) \\ + {\bf IntMap} & Representation of finite sets by an efficient + structure of map (trees indexed by binary integers).\\ + {\bf Reals} & Axiomatization of Real Numbers (classical, basic functions, + integer part, fractional part, limit, derivative, Cauchy + series, power series and results,... Requires the + \textbf{ZArith} library).\\ + {\bf Relations} & Relations (definitions and basic results). \\ + {\bf Sorting} & Sorted list (basic definitions and heapsort correctness). \\ + {\bf Wellfounded} & Well-founded relations (basic results). \\ + +\end{tabular} +\medskip + +These directories belong to the initial load path of the system, and +the modules they provide are compiled at installation time. So they +are directly accessible with the command \verb!Require! (see +chapter~\ref{Other-commands}). + +The different modules of the \Coq\ standard library are described in the +additional document \verb!Library.dvi!. They are also accessible on the WWW +through the \Coq\ homepage +\footnote{\texttt{http://coq.inria.fr}}. + +\subsection{Notations for integer arithmetics} +\index{Arithmetical notations} + +On figure \ref{zarith-syntax} is described the syntax of expressions +for integer arithmetics. It is provided by requiring and opening the +module {\tt ZArith} and opening scope {\tt Z\_scope}. + +\ttindex{+} +\ttindex{*} +\ttindex{-} +\ttindex{/} +\ttindex{<=} +\ttindex{>=} +\ttindex{<} +\ttindex{>} +\ttindex{?=} +\ttindex{mod} + +\begin{figure} +\begin{center} +\begin{tabular}{l|l|l|l} +Notation & Interpretation & Precedence & Associativity\\ +\hline +\verb!_ < _! & {\tt Zlt} &&\\ +\verb!x <= y! & {\tt Zle} &&\\ +\verb!_ > _! & {\tt Zgt} &&\\ +\verb!x >= y! & {\tt Zge} &&\\ +\verb!x < y < z! & {\tt x < y \verb!/\! y < z} &&\\ +\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} &&\\ +\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} &&\\ +\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} &&\\ +\verb!_ ?= _! & {\tt Zcompare} & 70 & no\\ +\verb!_ + _! & {\tt Zplus} &&\\ +\verb!_ - _! & {\tt Zminus} &&\\ +\verb!_ * _! & {\tt Zmult} &&\\ +\verb!_ / _! & {\tt Zdiv} &&\\ +\verb!_ mod _! & {\tt Zmod} & 40 & no \\ +\verb!- _! & {\tt Zopp} &&\\ +\verb!_ ^ _! & {\tt Zpower} &&\\ +\end{tabular} +\end{center} +\label{zarith-syntax} +\caption{Definition of the scope for integer arithmetics ({\tt Z\_scope})} +\end{figure} + +Figure~\ref{zarith-syntax} shows the notations provided by {\tt +Z\_scope}. It specifies how notations are interpreted and, when not +already reserved, the precedence and associativity. + +\begin{coq_example} +Require Import ZArith. +Check (2 + 3)%Z. +Open Scope Z_scope. +Check 2 + 3. +\end{coq_example} + +\subsection{Peano's arithmetic (\texttt{nat})} +\index{Peano's arithmetic} +\ttindex{nat\_scope} + +While in the initial state, many operations and predicates of Peano's +arithmetic are defined, further operations and results belong to other +modules. For instance, the decidability of the basic predicates are +defined here. This is provided by requiring the module {\tt Arith}. + +Figure~\ref{nat-syntax} describes notation available in scope {\tt +nat\_scope}. + +\begin{figure} +\begin{center} +\begin{tabular}{l|l} +Notation & Interpretation \\ +\hline +\verb!_ < _! & {\tt lt} \\ +\verb!x <= y! & {\tt le} \\ +\verb!_ > _! & {\tt gt} \\ +\verb!x >= y! & {\tt ge} \\ +\verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ +\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ +\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ +\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ +\verb!_ + _! & {\tt plus} \\ +\verb!_ - _! & {\tt minus} \\ +\verb!_ * _! & {\tt mult} \\ +\end{tabular} +\end{center} +\label{nat-syntax} +\caption{Definition of the scope for natural numbers ({\tt nat\_scope})} +\end{figure} + +\subsection{Real numbers library} + +\subsubsection{Notations for real numbers} +\index{Notations for real numbers} + +This is provided by requiring and opening the module {\tt Reals} and +opening scope {\tt R\_scope}. This set of notations is very similar to +the notation for integer arithmetics. The inverse function was added. +\begin{figure} +\begin{center} +\begin{tabular}{l|l} +Notation & Interpretation \\ +\hline +\verb!_ < _! & {\tt Rlt} \\ +\verb!x <= y! & {\tt Rle} \\ +\verb!_ > _! & {\tt Rgt} \\ +\verb!x >= y! & {\tt Rge} \\ +\verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ +\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ +\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ +\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ +\verb!_ + _! & {\tt Rplus} \\ +\verb!_ - _! & {\tt Rminus} \\ +\verb!_ * _! & {\tt Rmult} \\ +\verb!_ / _! & {\tt Rdiv} \\ +\verb!- _! & {\tt Ropp} \\ +\verb!/ _! & {\tt Rinv} \\ +\verb!_ ^ _! & {\tt pow} \\ +\end{tabular} +\end{center} +\label{reals-syntax} +\caption{Definition of the scope for real arithmetics ({\tt R\_scope})} +\end{figure} + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example} +Require Import Reals. +Check (2 + 3)%R. +Open Scope R_scope. +Check 2 + 3. +\end{coq_example} + +\subsubsection{Some tactics} + +In addition to the \verb|ring|, \verb|field| and \verb|fourier| +tactics (see Chapter~\ref{Tactics}) there are: +\begin{itemize} +\item {\tt discrR} \tacindex{discrR} + + Proves that a real integer constant $c_1$ is different from another + real integer constant $c_2$. + +\begin{coq_example*} +Require Import DiscrR. +Goal 5 <> 0. +\end{coq_example*} + +\begin{coq_example} +discrR. +\end{coq_example} + +\begin{coq_eval} +Abort. +\end{coq_eval} + +\item {\tt split\_Rabs} allows to unfold {\tt Rabs} constant and splits +corresponding conjonctions. +\tacindex{split\_Rabs} + +\begin{coq_example*} +Require Import SplitAbsolu. +Goal forall x:R, x <= Rabs x. +\end{coq_example*} + +\begin{coq_example} +intro; split_Rabs. +\end{coq_example} + +\begin{coq_eval} +Abort. +\end{coq_eval} + +\item {\tt split\_Rmult} allows to split a condition that a product is + non null into subgoals corresponding to the condition on each + operand of the product. +\tacindex{split\_Rmult} + +\begin{coq_example*} +Require Import SplitRmult. +Goal forall x y z:R, x * y * z <> 0. +\end{coq_example*} + +\begin{coq_example} +intros; split_Rmult. +\end{coq_example} + +\end{itemize} + +All this tactics has been written with the tactic language Ltac +described in Chapter~\ref{TacticLanguage}. More details are available +in document \url{http://coq.inria.fr/~desmettr/Reals.ps}. + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\subsection{List library} +\index{Notations for lists} +\ttindex{length} +\ttindex{head} +\ttindex{tail} +\ttindex{app} +\ttindex{rev} +\ttindex{nth} +\ttindex{map} +\ttindex{flat\_map} +\ttindex{fold\_left} +\ttindex{fold\_right} + +Some elementary operations on polymorphic lists are defined here. They +can be accessed by requiring module {\tt List}. + +It defines the following notions: +\begin{center} +\begin{tabular}{l|l} +\hline +{\tt length} & length \\ +{\tt head} & first element (with default) \\ +{\tt tail} & all but first element \\ +{\tt app} & concatenation \\ +{\tt rev} & reverse \\ +{\tt nth} & accessing $n$-th element (with default) \\ +{\tt map} & applying a function \\ +{\tt flat\_map} & applying a function returning lists \\ +{\tt fold\_left} & iterator (from head to tail) \\ +{\tt fold\_right} & iterator (from tail to head) \\ +\hline +\end{tabular} +\end{center} + +Table show notations available when opening scope {\tt list\_scope}. + +\begin{figure} +\begin{center} +\begin{tabular}{l|l|l|l} +Notation & Interpretation & Precedence & Associativity\\ +\hline +\verb!_ ++ _! & {\tt app} & 60 & right \\ +\verb!_ :: _! & {\tt cons} & 60 & right \\ +\end{tabular} +\end{center} +\label{list-syntax} +\caption{Definition of the scope for lists ({\tt list\_scope})} +\end{figure} + + +\section{Users' contributions} +\index{Contributions} +\label{Contributions} + +Numerous users' contributions have been collected and are available at +URL \url{coq.inria.fr/contribs/}. On this web page, you have a list +of all contributions with informations (author, institution, quick +description, etc.) and the possibility to download them one by one. +There is a small search engine to look for keywords in all +contributions. You will also find informations on how to submit a new +contribution. + +The users' contributions may also be obtained by anonymous FTP from site +\verb:ftp.inria.fr:, in directory \verb:INRIA/coq/: and +searchable on-line at \url{http://coq.inria.fr/contribs-eng.html} + +% $Id: RefMan-lib.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-ltac.tex b/doc/refman/RefMan-ltac.tex new file mode 100644 index 00000000..eced8099 --- /dev/null +++ b/doc/refman/RefMan-ltac.tex @@ -0,0 +1,1057 @@ +\chapter{The tactic language} +\label{TacticLanguage} + +%\geometry{a4paper,body={5in,8in}} + +This chapter gives a compact documentation of Ltac, the tactic +language available in {\Coq}. We start by giving the syntax, and next, +we present the informal semantics. If you want to know more regarding +this language and especially about its fundations, you can refer +to~\cite{Del00}. Chapter~\ref{Tactics-examples} is devoted to giving +examples of use of this language on small but also with non-trivial +problems. + + +\section{Syntax} + +\def\tacexpr{\textrm{\textsl{expr}}} +\def\tacexprlow{\textrm{\textsl{tacexpr$_1$}}} +\def\tacexprinf{\textrm{\textsl{tacexpr$_2$}}} +\def\tacexprpref{\textrm{\textsl{tacexpr$_3$}}} +\def\atom{\textrm{\textsl{atom}}} +\def\recclause{\textrm{\textsl{rec\_clause}}} +\def\letclause{\textrm{\textsl{let\_clause}}} +\def\matchrule{\textrm{\textsl{match\_rule}}} +\def\contextrule{\textrm{\textsl{context\_rule}}} +\def\contexthyps{\textrm{\textsl{context\_hyps}}} +\def\tacarg{\nterm{tacarg}} +\def\cpattern{\nterm{cpattern}} + +The syntax of the tactic language is given Figures~\ref{ltac} +and~\ref{ltac_aux}. See page~\pageref{BNF-syntax} for a description of +the BNF metasyntax used in these grammar rules. Various already defined +entries will be used in this chapter: entries {\naturalnumber}, +{\integer}, {\ident}, {\qualid}, {\term}, {\cpattern} and {\atomictac} +represent respectively the natural and integer numbers, the authorized +identificators and qualified names, {\Coq}'s terms and patterns and +all the atomic tactics described in chapter~\ref{Tactics}. The syntax +of {\cpattern} is the same as that of terms, but there can be specific +variables like {\tt ?id} where {\tt id} is a {\ident} or {\tt \_}, +which are metavariables for pattern matching. {\tt ?id} allows us to +keep instantiations and to make constraints whereas {\tt \_} shows +that we are not interested in what will be matched. On the right hand +side, they are used without the question mark. + +The main entry of the grammar is {\tacexpr}. This language is used in +proof mode but it can also be used in toplevel definitions as shown in +Figure~\ref{ltactop}. + +\begin{Remarks} +\item The infix tacticals ``\dots\ {\tt ||} \dots'' and ``\dots\ {\tt + ;} \dots'' are associative. + +\item As shown by the figure, tactical {\tt ||} binds more than the +prefix tacticals {\tt try}, {\tt repeat}, {\tt do}, {\tt info} and +{\tt abstract} which themselves bind more than the postfix tactical +``{\tt \dots\ ;[ \dots\ ]}'' which binds more than ``\dots\ {\tt ;} +\dots''. + +For instance +\begin{quote} +{\tt try repeat \tac$_1$ || + \tac$_2$;\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$];\tac$_4$.} +\end{quote} +is understood as +\begin{quote} +{\tt (try (repeat (\tac$_1$ || \tac$_2$)));} \\ +{\tt ((\tac$_3$;[\tac$_{31}$|\dots|\tac$_{3n}$]);\tac$_4$).} +\end{quote} +\end{Remarks} + + +\begin{figure}[htbp] +\begin{centerframe} +\begin{tabular}{lcl} +{\tacexpr} & ::= & + {\tacexpr} {\tt ;} {\tacexpr}\\ +& | & {\tacexpr} {\tt ; [} \nelist{\tacexpr}{|} {\tt ]}\\ +& | & {\tacexprpref}\\ +\\ +{\tacexprpref} & ::= & + {\tt do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} {\tacexprpref}\\ +& | & {\tt info} {\tacexprpref}\\ +& | & {\tt progress} {\tacexprpref}\\ +& | & {\tt repeat} {\tacexprpref}\\ +& | & {\tt try} {\tacexprpref}\\ +& | & {\tacexprinf} \\ +\\ +{\tacexprinf} & ::= & + {\tacexprlow} {\tt ||} {\tacexprpref}\\ +& | & {\tacexprlow}\\ +\\ +{\tacexprlow} & ::= & +{\tt fun} \nelist{\name}{} {\tt =>} {\atom}\\ +& | & +{\tt let} \nelist{\letclause}{\tt with} {\tt in} +{\atom}\\ +& | & +{\tt let rec} \nelist{\recclause}{\tt with} {\tt in} +{\tacexpr}\\ +& | & +{\tt match goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ +& | & +{\tt match reverse goal with} \nelist{\contextrule}{\tt |} {\tt end}\\ +& | & +{\tt match} {\tacexpr} {\tt with} \nelist{\matchrule}{\tt |} {\tt end}\\ +& | & {\tt abstract} {\atom}\\ +& | & {\tt abstract} {\atom} {\tt using} {\ident} \\ +& | & {\tt first [} \nelist{\tacexpr}{\tt |} {\tt ]}\\ +& | & {\tt solve [} \nelist{\tacexpr}{\tt |} {\tt ]}\\ +& | & {\tt idtac} ~|~ {\tt idtac} {\qstring}\\ +& | & {\tt fail} ~|~ {\tt fail} {\naturalnumber} {\qstring}\\ +& | & {\tt fresh} ~|~ {\tt fresh} {\qstring}\\ +& | & {\tt context} {\ident} {\tt [} {\term} {\tt ]}\\ +& | & {\tt eval} {\nterm{redexpr}} {\tt in} {\term}\\ +& | & {\tt type of} {\term}\\ +& | & {\tt constr :} {\term}\\ +& | & \atomictac\\ +& | & {\qualid} \nelist{\tacarg}{}\\ +& | & {\atom}\\ +\\ +{\atom} & ::= & + {\qualid} \\ +& | & ()\\ +& | & {\tt (} {\tacexpr} {\tt )}\\ +\end{tabular} +\end{centerframe} +\caption{Syntax of the tactic language} +\label{ltac} +\end{figure} + + + +\begin{figure}[htbp] +\begin{centerframe} +\begin{tabular}{lcl} +\tacarg & ::= & + {\qualid}\\ +& $|$ & {\tt ()} \\ +& $|$ & {\tt ltac :} {\atom}\\ +& $|$ & {\term}\\ +\\ +\letclause & ::= & {\ident} \sequence{\name}{} {\tt :=} {\tacexpr}\\ +\\ +\recclause & ::= & {\ident} \nelist{\name}{} {\tt :=} {\tacexpr}\\ +\\ +\contextrule & ::= & + \nelist{\contexthyps}{\tt ,} {\tt |-}{\cpattern} {\tt =>} {\tacexpr}\\ +& $|$ & {\tt |-} {\cpattern} {\tt =>} {\tacexpr}\\ +& $|$ & {\tt \_ =>} {\tacexpr}\\ +\\ +\contexthyps & ::= & {\name} {\tt :} {\cpattern}\\ +\\ +\matchrule & ::= & + {\cpattern} {\tt =>} {\tacexpr}\\ +& $|$ & {\tt context} {\zeroone{\ident}} {\tt [} {\cpattern} {\tt ]} {\tt =>} {\tacexpr}\\ +& $|$ & {\tt \_ =>} {\tacexpr}\\ +\end{tabular} +\end{centerframe} +\caption{Syntax of the tactic language (continued)} +\label{ltac_aux} +\end{figure} + +\begin{figure}[ht] +\begin{centerframe} +\begin{tabular}{lcl} +\nterm{top} & ::= & {\tt Ltac} \nelist{\nterm{ltac\_def}} {\tt with} \\ +\\ +\nterm{ltac\_def} & ::= & {\ident} \sequence{\ident}{} {\tt :=} {\tacexpr} +\end{tabular} +\end{centerframe} +\caption{Tactic toplevel definitions} +\label{ltactop} +\end{figure} + + +%% +%% Semantics +%% +\section{Semantics} +%\index[tactic]{Tacticals} +\index{Tacticals} +%\label{Tacticals} + +Tactic expressions can only be applied in the context of a goal. The +evaluation yields either a term, an integer or a tactic. Intermediary +results can be terms or integers but the final result must be a tactic +which is then applied to the current goal. + +There is a special case for {\tt match goal} expressions of which +the clauses evaluate to tactics. Such expressions can only be used as +end result of a tactic expression (never as argument of a local +definition or of an application). + +The rest of this section explains the semantics of every construction +of Ltac. + + +%% \subsection{Values} + +%% Values are given by Figure~\ref{ltacval}. All these values are tactic values, +%% i.e. to be applied to a goal, except {\tt Fun}, {\tt Rec} and $arg$ values. + +%% \begin{figure}[ht] +%% \noindent{}\framebox[6in][l] +%% {\parbox{6in} +%% {\begin{center} +%% \begin{tabular}{lp{0.1in}l} +%% $vexpr$ & ::= & $vexpr$ {\tt ;} $vexpr$\\ +%% & | & $vexpr$ {\tt ; [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt +%% ]}\\ +%% & | & $vatom$\\ +%% \\ +%% $vatom$ & ::= & {\tt Fun} \nelist{\inputfun}{} {\tt ->} {\tacexpr}\\ +%% %& | & {\tt Rec} \recclause\\ +%% & | & +%% {\tt Rec} \nelist{\recclause}{\tt And} {\tt In} +%% {\tacexpr}\\ +%% & | & +%% {\tt Match Context With} {\it (}$context\_rule$ {\tt |}{\it )}$^*$ +%% $context\_rule$\\ +%% & | & {\tt (} $vexpr$ {\tt )}\\ +%% & | & $vatom$ {\tt Orelse} $vatom$\\ +%% & | & {\tt Do} {\it (}{\naturalnumber} {\it |} {\ident}{\it )} $vatom$\\ +%% & | & {\tt Repeat} $vatom$\\ +%% & | & {\tt Try} $vatom$\\ +%% & | & {\tt First [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\ +%% & | & {\tt Solve [} {\it (}$vexpr$ {\tt |}{\it )}$^*$ $vexpr$ {\tt ]}\\ +%% & | & {\tt Idtac}\\ +%% & | & {\tt Fail}\\ +%% & | & {\primitivetactic}\\ +%% & | & $arg$ +%% \end{tabular} +%% \end{center}}} +%% \caption{Values of ${\cal L}_{tac}$} +%% \label{ltacval} +%% \end{figure} + +%% \subsection{Evaluation} + +\subsubsection{Sequence} +\tacindex{;} +\index{Tacticals!;@{\tt {\tac$_1$};\tac$_2$}} + +A sequence is an expression of the following form: +\begin{quote} +{\tacexpr}$_1$ {\tt ;} {\tacexpr}$_2$ +\end{quote} +{\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and +$v_2$. $v_1$ and $v_2$ must be tactic values. $v_1$ is then applied +and $v_2$ is applied to every subgoal generated by the application of +$v_1$. Sequence is left associating. + +\subsubsection{General sequence} +\tacindex{;[\ldots$\mid$\ldots$\mid$\ldots]} +%\tacindex{; [ | ]} +%\index{; [ | ]@{\tt ;[\ldots$\mid$\ldots$\mid$\ldots]}} +\index{Tacticals!; [ | ]@{\tt {\tac$_0$};[{\tac$_1$}$\mid$\ldots$\mid$\tac$_n$]}} + +We can generalize the previous sequence operator as +\begin{quote} +{\tacexpr}$_0$ {\tt ; [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} +{\tacexpr}$_n$ {\tt ]} +\end{quote} +{\tacexpr}$_i$ is evaluated to $v_i$, for $i=0,...,n$. $v_0$ is +applied and $v_i$ is applied to the $i$-th generated subgoal by the +application of $v_0$, for $=1,...,n$. It fails if the application of +$v_0$ does not generate exactly $n$ subgoals. + +\subsubsection{For loop} +\tacindex{do} +\index{Tacticals!do@{\tt do}} + +There is a for loop that repeats a tactic {\num} times: +\begin{quote} +{\tt do} {\num} {\tacexpr} +\end{quote} +{\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is +applied {\num} times. Supposing ${\num}>1$, after the first +application of $v$, $v$ is applied, at least once, to the generated +subgoals and so on. It fails if the application of $v$ fails before +the {\num} applications have been completed. + +\subsubsection{Repeat loop} +\tacindex{repeat} +\index{Tacticals!repeat@{\tt repeat}} + +We have a repeat loop with: +\begin{quote} +{\tt repeat} {\tacexpr} +\end{quote} +{\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is +applied until it fails. Supposing $n>1$, after the first application +of $v$, $v$ is applied, at least once, to the generated subgoals and +so on. It stops when it fails for all the generated subgoals. It never +fails. + +\subsubsection{Error catching} +\tacindex{try} +\index{Tacticals!try@{\tt try}} + +We can catch the tactic errors with: +\begin{quote} +{\tt try} {\tacexpr} +\end{quote} +{\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is +applied. If the application of $v$ fails, it catches the error and +leaves the goal unchanged. If the level of the exception is positive, +then the exception is re-raised with its level decremented. + +\subsubsection{Detecting progress} +\tacindex{progress} + +We can check if a tactic made progress with: +\begin{quote} +{\tt progress} {\tacexpr} +\end{quote} +{\tacexpr} is evaluated to $v$. $v$ must be a tactic value. $v$ is +applied. If the application of $v$ produced one subgoal equal to the +initial goal (up to syntactical equality), then an error of level 0 is +raised. + +\ErrMsg \errindex{Failed to progress} + +\subsubsection{Branching} +\tacindex{$\mid\mid$} +\index{Tacticals!orelse@{\tt $\mid\mid$}} + +We can easily branch with the following structure: +\begin{quote} +{\tacexpr}$_1$ {\tt ||} {\tacexpr}$_2$ +\end{quote} +{\tacexpr}$_1$ and {\tacexpr}$_2$ are evaluated to $v_1$ and +$v_2$. $v_1$ and $v_2$ must be tactic values. $v_1$ is applied and if +it fails then $v_2$ is applied. Branching is left associating. + +\subsubsection{First tactic to work} +\tacindex{first} +\index{Tacticals!first@{\tt first}} + +We may consider the first tactic to work (i.e. which does not fail) among a +panel of tactics: +\begin{quote} +{\tt first [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} +\end{quote} +{\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values, for +$i=1,...,n$. Supposing $n>1$, it applies $v_1$, if it works, it stops else it +tries to apply $v_2$ and so on. It fails when there is no applicable tactic. + +\ErrMsg \errindex{No applicable tactic} + +\subsubsection{Solving} +\tacindex{solve} +\index{Tacticals!solve@{\tt solve}} + +We may consider the first to solve (i.e. which generates no subgoal) among a +panel of tactics: +\begin{quote} +{\tt solve [} {\tacexpr}$_1$ {\tt |} $...$ {\tt |} {\tacexpr}$_n$ {\tt ]} +\end{quote} +{\tacexpr}$_i$ are evaluated to $v_i$ and $v_i$ must be tactic values, for +$i=1,...,n$. Supposing $n>1$, it applies $v_1$, if it solves, it stops else it +tries to apply $v_2$ and so on. It fails if there is no solving tactic. + +\ErrMsg \errindex{Cannot solve the goal} + +\subsubsection{Identity} +\tacindex{idtac} +\index{Tacticals!idtac@{\tt idtac}} + +The constant {\tt idtac} is the identity tactic: it leaves any goal +unchanged but it appears in the proof script. +\begin{quote} +{\tt idtac} and {\tt idtac "message"} +\end{quote} +The latter variant prints the string on the standard output. + + +\subsubsection{Failing} +\tacindex{fail} +\index{Tacticals!fail@{\tt fail}} + +The tactic {\tt fail} is the always-failing tactic: it does not solve +any goal. It is useful for defining other tacticals since it can be +catched by {\tt try} or {\tt match goal}. There are three variants: +\begin{quote} +{\tt fail $n$}, {\tt fail "message"} and {\tt fail $n$ "message"} +\end{quote} +The number $n$ is the failure level. If no level is specified, it +defaults to $0$. The level is used by {\tt try} and {\tt match goal}. +If $0$, it makes {\tt match goal} considering the next clause +(backtracking). If non zero, the current {\tt match goal} block or +{\tt try} command is aborted and the level is decremented. + +\ErrMsg \errindex{Tactic Failure "message" (level $n$)}. + +\subsubsection{Local definitions} +\index{Ltac!let} +\index{Ltac!let rec} +\index{let!in Ltac} +\index{let rec!in Ltac} + +Local definitions can be done as follows: +\begin{quote} +{\tt let} {\ident}$_1$ {\tt :=} {\tacexpr}$_1$\\ +{\tt with} {\ident}$_2$ {\tt :=} {\tacexpr}$_2$\\ +...\\ +{\tt with} {\ident}$_n$ {\tt :=} {\tacexpr}$_n$ {\tt in}\\ +{\tacexpr} +\end{quote} +each {\tacexpr}$_i$ is evaluated to $v_i$, then, {\tacexpr} is +evaluated by substituting $v_i$ to each occurrence of {\ident}$_i$, +for $i=1,...,n$. There is no dependencies between the {\tacexpr}$_i$ +and the {\ident}$_i$. + +Local definitions can be recursive by using {\tt let rec} instead of +{\tt let}. Only functions can be defined by recursion, so at least one +argument is required. + +\subsubsection{Application} + +An application is an expression of the following form: +\begin{quote} +{\qualid} {\tacarg}$_1$ ... {\tacarg}$_n$ +\end{quote} +The reference {\qualid} must be bound to some defined tactic +definition expecting at least $n$ arguments. The expressions +{\tacexpr}$_i$ are evaluated to $v_i$, for $i=1,...,n$. +%If {\tacexpr} is a {\tt Fun} or {\tt Rec} value then the body is evaluated by +%substituting $v_i$ to the formal parameters, for $i=1,...,n$. For recursive +%clauses, the bodies are lazily substituted (when an identifier to be evaluated +%is the name of a recursive clause). + +%\subsection{Application of tactic values} + +\subsubsection{Function construction} +\index{fun!in Ltac} +\index{Ltac!fun} + +A parameterized tactic can be built anonymously (without resorting to +local definitions) with: +\begin{quote} +{\tt fun} {\ident${}_1$} ... {\ident${}_n$} {\tt =>} {\tacexpr} +\end{quote} +Indeed, local definitions of functions are a syntactic sugar for +binding a {\tt fun} tactic to an identifier. + +\subsubsection{Pattern matching on terms} +\index{Ltac!match} +\index{match!in Ltac} + +We can carry out pattern matching on terms with: +\begin{quote} +{\tt match} {\tacexpr} {\tt with}\\ +~~~{\cpattern}$_1$ {\tt =>} {\tacexpr}$_1$\\ +~{\tt |} {\cpattern}$_2$ {\tt =>} {\tacexpr}$_2$\\ +~...\\ +~{\tt |} {\cpattern}$_n$ {\tt =>} {\tacexpr}$_n$\\ +~{\tt |} {\tt \_} {\tt =>} {\tacexpr}$_{n+1}$\\ +{\tt end} +\end{quote} +The {\tacexpr} is evaluated and should yield a term which is matched +(non-linear first order unification) against {\cpattern}$_1$ then +{\tacexpr}$_1$ is evaluated into some value by substituting the +pattern matching instantiations to the metavariables. If the matching +with {\cpattern}$_1$ fails, {\cpattern}$_2$ is used and so on. The +pattern {\_} matches any term and shunts all remaining patterns if +any. If {\tacexpr}$_1$ evaluates to a tactic, this tactic is not +immediately applied to the current goal (in contrast with {\tt match +goal}). If all clauses fail (in particular, there is no pattern {\_}) +then a no-matching error is raised. + +\begin{ErrMsgs} + +\item \errindex{No matching clauses for match} + + No pattern can be used and, in particular, there is no {\tt \_} pattern. + +\item \errindex{Argument of match does not evaluate to a term} + + This happens when {\tacexpr} does not denote a term. + +\end{ErrMsgs} + +\index{context!in pattern} +There is a special form of patterns to match a subterm against the +pattern: +\begin{quote} +{\tt context} {\ident} {\tt [} {\cpattern} {\tt ]} +\end{quote} +It matches any term which one subterm matches {\cpattern}. If there is +a match, the optional {\ident} is assign the ``matched context'', that +is the initial term where the matched subterm is replaced by a +hole. The definition of {\tt context} in expressions below will show +how to use such term contexts. + +This operator never makes backtracking. If there are several subterms +matching the pattern, only the first match is considered. Note that +the order of matching is left unspecified. +%% TODO: clarify this point! It *should* be specified + + +\subsubsection{Pattern matching on goals} +\index{Ltac!match goal} +\index{Ltac!match reverse goal} +\index{match goal!in Ltac} +\index{match reverse goal!in Ltac} + +We can make pattern matching on goals using the following expression: +\begin{quote} +\begin{tabbing} +{\tt match goal with}\\ +~~\={\tt |} $hyp_{1,1}${\tt ,}...{\tt ,}$hyp_{1,m_1}$ + ~~{\tt |-}{\cpattern}$_1${\tt =>} {\tacexpr}$_1$\\ + \>{\tt |} $hyp_{2,1}${\tt ,}...{\tt ,}$hyp_{2,m_2}$ + ~~{\tt |-}{\cpattern}$_2${\tt =>} {\tacexpr}$_2$\\ +~~...\\ + \>{\tt |} $hyp_{n,1}${\tt ,}...{\tt ,}$hyp_{n,m_n}$ + ~~{\tt |-}{\cpattern}$_n${\tt =>} {\tacexpr}$_n$\\ + \>{\tt |\_}~~~~{\tt =>} {\tacexpr}$_{n+1}$\\ +{\tt end} +\end{tabbing} +\end{quote} + +% TODO: specify order of hypothesis and explain reverse... + +If each hypothesis pattern $hyp_{1,i}$, with $i=1,...,m_1$ +is matched (non-linear first order unification) by an hypothesis of +the goal and if {\cpattern}$_1$ is matched by the conclusion of the +goal, then {\tacexpr}$_1$ is evaluated to $v_1$ by substituting the +pattern matching to the metavariables and the real hypothesis names +bound to the possible hypothesis names occurring in the hypothesis +patterns. If $v_1$ is a tactic value, then it is applied to the +goal. If this application fails, then another combination of +hypotheses is tried with the same proof context pattern. If there is +no other combination of hypotheses then the second proof context +pattern is tried and so on. If the next to last proof context pattern +fails then {\tacexpr}$_{n+1}$ is evaluated to $v_{n+1}$ and $v_{n+1}$ +is applied. + +\ErrMsg \errindex{No matching clauses for match goal} + + No goal pattern can be used and, in particular, there is no {\tt + \_} goal pattern. + +\medskip + +It is important to know that each hypothesis of the goal can be +matched by at most one hypothesis pattern. The order of matching is +the following: hypothesis patterns are examined from the right to the +left (i.e. $hyp_{i,m_i}$ before $hyp_{i,1}$). For each hypothesis +pattern, the goal hypothesis are matched in order (fresher hypothesis +first), but it possible to reverse this order (older first) with +the {\tt match reverse goal with} variant. + +\subsubsection{Filling a term context} +\index{context!in expression} + +The following expression is not a tactic in the sense that it does not +produce subgoals but generates a term to be used in tactic +expressions: +\begin{quote} +{\tt context} {\ident} {\tt [} {\tacexpr} {\tt ]} +\end{quote} +{\ident} must denote a context variable bound by a {\tt context} +pattern of a {\tt match} expression. This expression evaluates +replaces the hole of the value of {\ident} by the value of +{\tacexpr}. + +\ErrMsg \errindex{not a context variable} + + +\subsubsection{Generating fresh hypothesis names} +\index{Ltac!fresh} +\index{fresh!in Ltac} + +Tactics sometimes have to generate new names for hypothesis. Letting +the system decide a name with the {\tt intro} tactic is not so good +since it is very awkward to retrieve the name the system gave. + +As before, the following expression returns a term: +\begin{quote} +{\tt fresh} {\qstring} +\end{quote} +It evaluates to an identifier unbound in the goal, which is obtained +by padding {\qstring} with a number if necessary. If no name is given, +the prefix is {\tt H}. + +\subsubsection{{\tt type of} {\term}} +%\tacindex{type of} +\index{Ltac!type of} +\index{type of!in Ltac} + +This tactic computes the type of {\term}. + +\subsubsection{Computing in a constr} +\index{Ltac!eval} +\index{eval!in Ltac} + +Evaluation of a term can be performed with: +\begin{quote} +{\tt eval} {\nterm{redexpr}} {\tt in} {\term} +\end{quote} +where \nterm{redexpr} is a reduction tactic among {\tt red}, {\tt +hnf}, {\tt compute}, {\tt simpl}, {\tt cbv}, {\tt lazy}, {\tt unfold}, +{\tt fold}, {\tt pattern}. + + +\subsubsection{Accessing tactic decomposition} +\tacindex{info} +\index{Tacticals!info@{\tt info}} + +Tactical ``{\tt info} {\tacexpr}'' is not really a tactical. For +elementary tactics, this is equivalent to \tacexpr. For complex tactic +like \texttt{auto}, it displays the operations performed by the +tactic. + +\subsubsection{Proving a subgoal as a separate lemma} +\tacindex{abstract} +\index{Tacticals!abstract@{\tt abstract}} + +From the outside ``\texttt{abstract \tacexpr}'' is the same as +{\tt solve \tacexpr}. Internally it saves an auxiliary lemma called +{\ident}\texttt{\_subproof}\textit{n} where {\ident} is the name of the +current goal and \textit{n} is chosen so that this is a fresh name. + +This tactical is useful with tactics such as \texttt{omega} or +\texttt{discriminate} that generate huge proof terms. With that tool +the user can avoid the explosion at time of the \texttt{Save} command +without having to cut manually the proof in smaller lemmas. + +\begin{Variants} +\item \texttt{abstract {\tacexpr} using {\ident}}.\\ + Give explicitly the name of the auxiliary lemma. +\end{Variants} + +\ErrMsg \errindex{Proof is not complete} + +\section{Tactic toplevel definitions} +\comindex{Ltac} + +Basically, tactics toplevel definitions are made as follows: +%{\tt Tactic Definition} {\ident} {\tt :=} {\tacexpr}\\ +% +%{\tacexpr} is evaluated to $v$ and $v$ is associated to {\ident}. Next, every +%script is evaluated by substituting $v$ to {\ident}. +% +%We can define functional definitions by:\\ +\begin{quote} +{\tt Ltac} {\ident} {\ident}$_1$ ... {\ident}$_n$ {\tt :=} +{\tacexpr} +\end{quote} +This defines a new tactic that can be used in any tactic script or new +tactic toplevel definition. + +\Rem The preceding definition can equivalently be written: +\begin{quote} +{\tt Ltac} {\ident} {\tt := fun} {\ident}$_1$ ... {\ident}$_n$ +{\tt =>} {\tacexpr} +\end{quote} +Recursive and mutual recursive function definitions are also +possible with the syntax: +\begin{quote} +{\tt Ltac} {\ident}$_1$ {\ident}$_{1,1}$ ... +{\ident}$_{1,m_1}$~~{\tt :=} {\tacexpr}$_1$\\ +{\tt with} {\ident}$_2$ {\ident}$_{2,1}$ ... {\ident}$_{2,m_2}$~~{\tt :=} +{\tacexpr}$_2$\\ +...\\ +{\tt with} {\ident}$_n$ {\ident}$_{n,1}$ ... {\ident}$_{n,m_n}$~~{\tt :=} +{\tacexpr}$_n$ +\end{quote} + +%This definition bloc is a set of definitions (use of +%the same previous syntactical sugar) and the other scripts are evaluated as +%usual except that the substitutions are lazily carried out (when an identifier +%to be evaluated is the name of a recursive definition). + +\endinput + + +\subsection{Permutation on closed lists} + +\begin{figure}[b] +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example*} +Require Import List. +Section Sort. +Variable A : Set. +Inductive permut : list A -> list A -> Prop := + | permut_refl : forall l, permut l l + | permut_cons : + forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1) + | permut_append : forall a l, permut (a :: l) (l ++ a :: nil) + | permut_trans : + forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2. +End Sort. +\end{coq_example*} +\end{center} +\caption{Definition of the permutation predicate} +\label{permutpred} +\end{figure} + + +Another more complex example is the problem of permutation on closed +lists. The aim is to show that a closed list is a permutation of +another one. First, we define the permutation predicate as shown on +Figure~\ref{permutpred}. + +\begin{figure}[p] +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example} +Ltac Permut n := + match goal with + | |- (permut _ ?l ?l) => apply permut_refl + | |- (permut _ (?a :: ?l1) (?a :: ?l2)) => + let newn := eval compute in (length l1) in + (apply permut_cons; Permut newn) + | |- (permut ?A (?a :: ?l1) ?l2) => + match eval compute in n with + | 1 => fail + | _ => + let l1' := constr:(l1 ++ a :: nil) in + (apply (permut_trans A (a :: l1) l1' l2); + [ apply permut_append | compute; Permut (pred n) ]) + end + end. +Ltac PermutProve := + match goal with + | |- (permut _ ?l1 ?l2) => + match eval compute in (length l1 = length l2) with + | (?n = ?n) => Permut n + end + end. +\end{coq_example} +\end{minipage}} +\end{center} +\caption{Permutation tactic} +\label{permutltac} +\end{figure} + +\begin{figure}[p] +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example*} +Lemma permut_ex1 : + permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil). +Proof. +PermutProve. +Qed. + +Lemma permut_ex2 : + permut nat + (0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil) + (0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil). +Proof. +PermutProve. +Qed. +\end{coq_example*} +\end{minipage}} +\end{center} +\caption{Examples of {\tt PermutProve} use} +\label{permutlem} +\end{figure} + +Next, we can write naturally the tactic and the result can be seen on +Figure~\ref{permutltac}. We can notice that we use two toplevel +definitions {\tt PermutProve} and {\tt Permut}. The function to be +called is {\tt PermutProve} which computes the lengths of the two +lists and calls {\tt Permut} with the length if the two lists have the +same length. {\tt Permut} works as expected. If the two lists are +equal, it concludes. Otherwise, if the lists have identical first +elements, it applies {\tt Permut} on the tail of the lists. Finally, +if the lists have different first elements, it puts the first element +of one of the lists (here the second one which appears in the {\tt + permut} predicate) at the end if that is possible, i.e., if the new +first element has been at this place previously. To verify that all +rotations have been done for a list, we use the length of the list as +an argument for {\tt Permut} and this length is decremented for each +rotation down to, but not including, 1 because for a list of length +$n$, we can make exactly $n-1$ rotations to generate at most $n$ +distinct lists. Here, it must be noticed that we use the natural +numbers of {\Coq} for the rotation counter. On Figure~\ref{ltac}, we +can see that it is possible to use usual natural numbers but they are +only used as arguments for primitive tactics and they cannot be +handled, in particular, we cannot make computations with them. So, a +natural choice is to use {\Coq} data structures so that {\Coq} makes +the computations (reductions) by {\tt eval compute in} and we can get +the terms back by {\tt match}. + +With {\tt PermutProve}, we can now prove lemmas such those shown on +Figure~\ref{permutlem}. + + +\subsection{Deciding intuitionistic propositional logic} + +\begin{figure}[tbp] +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example} +Ltac Axioms := + match goal with + | |- True => trivial + | _:False |- _ => elimtype False; assumption + | _:?A |- ?A => auto + end. +Ltac DSimplif := + repeat + (intros; + match goal with + | id:(~ _) |- _ => red in id + | id:(_ /\ _) |- _ => + elim id; do 2 intro; clear id + | id:(_ \/ _) |- _ => + elim id; intro; clear id + | id:(?A /\ ?B -> ?C) |- _ => + cut (A -> B -> C); + [ intro | intros; apply id; split; assumption ] + | id:(?A \/ ?B -> ?C) |- _ => + cut (B -> C); + [ cut (A -> C); + [ intros; clear id + | intro; apply id; left; assumption ] + | intro; apply id; right; assumption ] + | id0:(?A -> ?B),id1:?A |- _ => + cut B; [ intro; clear id0 | apply id0; assumption ] + | |- (_ /\ _) => split + | |- (~ _) => red + end). +\end{coq_example} +\end{minipage}} +\end{center} +\caption{Deciding intuitionistic propositions (1)} +\label{tautoltaca} +\end{figure} + +\begin{figure} +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example} +Ltac TautoProp := + DSimplif; + Axioms || + match goal with + | id:((?A -> ?B) -> ?C) |- _ => + cut (B -> C); + [ intro; cut (A -> B); + [ intro; cut C; + [ intro; clear id | apply id; assumption ] + | clear id ] + | intro; apply id; intro; assumption ]; TautoProp + | id:(~ ?A -> ?B) |- _ => + cut (False -> B); + [ intro; cut (A -> False); + [ intro; cut B; + [ intro; clear id | apply id; assumption ] + | clear id ] + | intro; apply id; red; intro; assumption ]; TautoProp + | |- (_ \/ _) => (left; TautoProp) || (right; TautoProp) + end. +\end{coq_example} +\end{minipage}} +\end{center} +\caption{Deciding intuitionistic propositions (2)} +\label{tautoltacb} +\end{figure} + +The pattern matching on goals allows a complete and so a powerful +backtracking when returning tactic values. An interesting application +is the problem of deciding intuitionistic propositional logic. +Considering the contraction-free sequent calculi {\tt LJT*} of +Roy~Dyckhoff (\cite{Dyc92}), it is quite natural to code such a tactic +using the tactic language. On Figure~\ref{tautoltaca}, the tactic {\tt + Axioms} tries to conclude using usual axioms. The {\tt DSimplif} +tactic applies all the reversible rules of Dyckhoff's system. +Finally, on Figure~\ref{tautoltacb}, the {\tt TautoProp} tactic (the +main tactic to be called) simplifies with {\tt DSimplif}, tries to +conclude with {\tt Axioms} and tries several paths using the +backtracking rules (one of the four Dyckhoff's rules for the left +implication to get rid of the contraction and the right or). + +\begin{figure}[tb] +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example*} +Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B. +Proof. +TautoProp. +Qed. + +Lemma tauto_ex2 : + forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B. +Proof. +TautoProp. +Qed. +\end{coq_example*} +\end{minipage}} +\end{center} +\caption{Proofs of tautologies with {\tt TautoProp}} +\label{tautolem} +\end{figure} + +For example, with {\tt TautoProp}, we can prove tautologies like those of +Figure~\ref{tautolem}. + + +\subsection{Deciding type isomorphisms} + +A more tricky problem is to decide equalities between types and modulo +isomorphisms. Here, we choose to use the isomorphisms of the simply typed +$\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, +\cite{RC95}). The axioms of this $\lb{}$-calculus are given by +Figure~\ref{isosax}. + +\begin{figure} +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_eval} +Reset Initial. +\end{coq_eval} +\begin{coq_example*} +Open Scope type_scope. +Section Iso_axioms. +Variables A B C : Set. +Axiom Com : A * B = B * A. +Axiom Ass : A * (B * C) = A * B * C. +Axiom Cur : (A * B -> C) = (A -> B -> C). +Axiom Dis : (A -> B * C) = (A -> B) * (A -> C). +Axiom P_unit : A * unit = A. +Axiom AR_unit : (A -> unit) = unit. +Axiom AL_unit : (unit -> A) = A. +Lemma Cons : B = C -> A * B = A * C. +Proof. +intro Heq; rewrite Heq; apply refl_equal. +Qed. +End Iso_axioms. +\end{coq_example*} +\end{minipage}} +\end{center} +\caption{Type isomorphism axioms} +\label{isosax} +\end{figure} + +The tactic to judge equalities modulo this axiomatization can be written as +shown on Figures~\ref{isosltac1} and~\ref{isosltac2}. The algorithm is quite +simple. Types are reduced using axioms that can be oriented (this done by {\tt +MainSimplif}). The normal forms are sequences of Cartesian +products without Cartesian product in the left component. These normal forms +are then compared modulo permutation of the components (this is done by {\tt +CompareStruct}). The main tactic to be called and realizing this algorithm is +{\tt IsoProve}. + +\begin{figure} +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example} +Ltac DSimplif trm := + match trm with + | (?A * ?B * ?C) => + rewrite <- (Ass A B C); try MainSimplif + | (?A * ?B -> ?C) => + rewrite (Cur A B C); try MainSimplif + | (?A -> ?B * ?C) => + rewrite (Dis A B C); try MainSimplif + | (?A * unit) => + rewrite (P_unit A); try MainSimplif + | (unit * ?B) => + rewrite (Com unit B); try MainSimplif + | (?A -> unit) => + rewrite (AR_unit A); try MainSimplif + | (unit -> ?B) => + rewrite (AL_unit B); try MainSimplif + | (?A * ?B) => + (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) + | (?A -> ?B) => + (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) + end + with MainSimplif := + match goal with + | |- (?A = ?B) => try DSimplif A; try DSimplif B + end. +Ltac Length trm := + match trm with + | (_ * ?B) => let succ := Length B in constr:(S succ) + | _ => constr:1 + end. +Ltac assoc := repeat rewrite <- Ass. +\end{coq_example} +\end{minipage}} +\end{center} +\caption{Type isomorphism tactic (1)} +\label{isosltac1} +\end{figure} + +\begin{figure} +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example} +Ltac DoCompare n := + match goal with + | [ |- (?A = ?A) ] => apply refl_equal + | [ |- (?A * ?B = ?A * ?C) ] => + apply Cons; let newn := Length B in DoCompare newn + | [ |- (?A * ?B = ?C) ] => + match eval compute in n with + | 1 => fail + | _ => + pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n) + end + end. +Ltac CompareStruct := + match goal with + | [ |- (?A = ?B) ] => + let l1 := Length A + with l2 := Length B in + match eval compute in (l1 = l2) with + | (?n = ?n) => DoCompare n + end + end. +Ltac IsoProve := MainSimplif; CompareStruct. +\end{coq_example} +\end{minipage}} +\end{center} +\caption{Type isomorphism tactic (2)} +\label{isosltac2} +\end{figure} + +Figure~\ref{isoslem} gives examples of what can be solved by {\tt IsoProve}. + +\begin{figure} +\begin{center} +\fbox{\begin{minipage}{0.95\textwidth} +\begin{coq_example*} +Lemma isos_ex1 : + forall A B:Set, A * unit * B = B * (unit * A). +Proof. +intros; IsoProve. +Qed. + +Lemma isos_ex2 : + forall A B C:Set, + (A * unit -> B * (C * unit)) = + (A * unit -> (C -> unit) * C) * (unit -> A -> B). +Proof. +intros; IsoProve. +Qed. +\end{coq_example*} +\end{minipage}} +\end{center} +\caption{Type equalities solved by {\tt IsoProve}} +\label{isoslem} +\end{figure} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-mod.tex b/doc/refman/RefMan-mod.tex new file mode 100644 index 00000000..9f6f2abc --- /dev/null +++ b/doc/refman/RefMan-mod.tex @@ -0,0 +1,396 @@ +\section{Module system +\index{Modules} +\label{section:Modules}} + +The module system provides a way of packaging related elements +together, as well as a mean of massive abstraction. + +\begin{figure}[t] +\begin{centerframe} +\begin{tabular}{rcl} +{\modtype} & ::= & {\ident} \\ + & $|$ & {\modtype} \texttt{ with Definition }{\ident} := {\term} \\ + & $|$ & {\modtype} \texttt{ with Module }{\ident} := {\qualid} \\ + &&\\ + +{\onemodbinding} & ::= & {\tt ( \nelist{\ident}{} : {\modtype} )}\\ + &&\\ + +{\modbindings} & ::= & \nelist{\onemodbinding}{}\\ + &&\\ + +{\modexpr} & ::= & \nelist{\qualid}{} +\end{tabular} +\end{centerframe} +\caption{Syntax of modules} +\end{figure} + +\subsection{\tt Module {\ident} +\comindex{Module}} + +This command is used to start an interactive module named {\ident}. + +\begin{Variants} + +\item{\tt Module {\ident} {\modbindings}} + + Starts an interactive functor with parameters given by {\modbindings}. + +\item{\tt Module {\ident} \verb.:. {\modtype}} + + Starts an interactive module specifying its module type. + +\item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype}} + + Starts an interactive functor with parameters given by + {\modbindings}, and output module type {\modtype}. + +\item{\tt Module {\ident} \verb.<:. {\modtype}} + + Starts an interactive module satisfying {\modtype}. + +\item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype}} + + Starts an interactive functor with parameters given by + {\modbindings}. The output module type is verified against the + module type {\modtype}. + +\end{Variants} + +\subsection{\tt End {\ident} +\comindex{End}} + +This command closes the interactive module {\ident}. If the module type +was given the content of the module is matched against it and an error +is signaled if the matching fails. If the module is basic (is not a +functor) its components (constants, inductive types, submodules etc) are +now available through the dot notation. + +\begin{ErrMsgs} +\item \errindex{No such label {\ident}} +\item \errindex{Signature components for label {\ident} do not match} +\item \errindex{This is not the last opened module} +\end{ErrMsgs} + + +\subsection{\tt Module {\ident} := {\modexpr} +\comindex{Module}} + +This command defines the module identifier {\ident} to be equal to +{\modexpr}. + +\begin{Variants} +\item{\tt Module {\ident} {\modbindings} := {\modexpr}} + + Defines a functor with parameters given by {\modbindings} and body {\modexpr}. + +% Particular cases of the next 2 items +%\item{\tt Module {\ident} \verb.:. {\modtype} := {\modexpr}} +% +% Defines a module with body {\modexpr} and interface {\modtype}. +%\item{\tt Module {\ident} \verb.<:. {\modtype} := {\modexpr}} +% +% Defines a module with body {\modexpr}, satisfying {\modtype}. + +\item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype} := + {\modexpr}} + + Defines a functor with parameters given by {\modbindings} (possibly none), + and output module type {\modtype}, with body {\modexpr}. + +\item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype} := + {\modexpr}} + + Defines a functor with parameters given by {\modbindings} (possibly none) + with body {\modexpr}. The body is checked against {\modtype}. + +\end{Variants} + +\subsection{\tt Module Type {\ident} +\comindex{Module Type}} + +This command is used to start an interactive module type {\ident}. + +\begin{Variants} + +\item{\tt Module Type {\ident} {\modbindings}} + + Starts an interactive functor type with parameters given by {\modbindings}. + +\end{Variants} + +\subsection{\tt End {\ident} +\comindex{End}} + +This command closes the interactive module type {\ident}. + +\begin{ErrMsgs} +\item \errindex{This is not the last opened module type} +\end{ErrMsgs} + +\subsection{\tt Module Type {\ident} := {\modtype}} + +Defines a module type {\ident} equal to {\modtype}. + +\begin{Variants} +\item {\tt Module Type {\ident} {\modbindings} := {\modtype}} + + Defines a functor type {\ident} specifying functors taking arguments + {\modbindings} and returning {\modtype}. +\end{Variants} + +\subsection{\tt Declare Module {\ident}} + +Starts an interactive module declaration. This command is available +only in module types. + +\begin{Variants} + +\item{\tt Declare Module {\ident} {\modbindings}} + + Starts an interactive declaration of a functor with parameters given + by {\modbindings}. + +% Particular case of the next item +%\item{\tt Declare Module {\ident} \verb.<:. {\modtype}} +% +% Starts an interactive declaration of a module satisfying {\modtype}. + +\item{\tt Declare Module {\ident} {\modbindings} \verb.<:. {\modtype}} + + Starts an interactive declaration of a functor with parameters given + by {\modbindings} (possibly none). The declared output module type is + verified against the module type {\modtype}. + +\end{Variants} + +\subsection{\tt End {\ident}} + +This command closes the interactive declaration of module {\ident}. + +\subsection{\tt Declare Module {\ident} : {\modtype}} + +Declares a module of {\ident} of type {\modtype}. This command is available +only in module types. + +\begin{Variants} + +\item{\tt Declare Module {\ident} {\modbindings} \verb.:. {\modtype}} + + Declares a functor with parameters {\modbindings} and output module + type {\modtype}. + +\item{\tt Declare Module {\ident} := {\qualid}} + + Declares a module equal to the module {\qualid}. + +\item{\tt Declare Module {\ident} \verb.<:. {\modtype} := {\qualid}} + + Declares a module equal to the module {\qualid}, verifying that the + module type of the latter is a subtype of {\modtype}. + +\end{Variants} + + +\subsubsection{Example} + +Let us define a simple module. +\begin{coq_example} +Module M. + Definition T := nat. + Definition x := 0. + Definition y : bool. + exact true. + Defined. +End M. +\end{coq_example} +Inside a module one can define constants, prove theorems and do any +other things that can be done in the toplevel. Components of a closed +module can be accessed using the dot notation: +\begin{coq_example} +Print M.x. +\end{coq_example} +A simple module type: +\begin{coq_example} +Module Type SIG. + Parameter T : Set. + Parameter x : T. +End SIG. +\end{coq_example} +Inside a module type the proof editing mode is not available. +Consequently commands like \texttt{Definition}\ without body, +\texttt{Lemma}, \texttt{Theorem} are not allowed. In order to declare +constants, use \texttt{Axiom} and \texttt{Parameter}. + +Now we can create a new module from \texttt{M}, giving it a less +precise specification: the \texttt{y} component is dropped as well +as the body of \texttt{x}. + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is not correct and should produce **********) +(***************** Error: N.y not a defined object *******************) +\end{coq_eval} +\begin{coq_example} +Module N : SIG with Definition T := nat := M. +Print N.T. +Print N.x. +Print N.y. +\end{coq_example} +\begin{coq_eval} +Reset N. +\end{coq_eval} + +\noindent +The definition of \texttt{N} using the module type expression +\texttt{SIG with Definition T:=nat} is equivalent to the following +one: + +\begin{coq_example*} +Module Type SIG'. + Definition T : Set := nat. + Parameter x : T. +End SIG'. +Module N : SIG' := M. +\end{coq_example*} +If we just want to be sure that the our implementation satisfies a +given module type without restricting the interface, we can use a +transparent constraint +\begin{coq_example} +Module P <: SIG := M. +Print P.y. +\end{coq_example} +Now let us create a functor, i.e. a parametric module +\begin{coq_example} +Module Two (X Y: SIG). +\end{coq_example} +\begin{coq_example*} + Definition T := (X.T * Y.T)%type. + Definition x := (X.x, Y.x). +\end{coq_example*} +\begin{coq_example} +End Two. +\end{coq_example} +and apply it to our modules and do some computations +\begin{coq_example} +Module Q := Two M N. +Eval compute in (fst Q.x + snd Q.x). +\end{coq_example} +In the end, let us define a module type with two sub-modules, sharing +some of the fields and give one of its possible implementations: +\begin{coq_example} +Module Type SIG2. + Declare Module M1 : SIG. + Declare Module M2 <: SIG. + Definition T := M1.T. + Parameter x : T. + End M2. +End SIG2. +\end{coq_example} +\begin{coq_example*} +Module Mod <: SIG2. + Module M1. + Definition T := nat. + Definition x := 1. + End M1. + Module M2 := M. +\end{coq_example*} +\begin{coq_example} +End Mod. +\end{coq_example} +Notice that \texttt{M} is a correct body for the component \texttt{M2} +since its \texttt{T} component is equal \texttt{nat} and hence +\texttt{M1.T} as specified. +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{Remarks} +\item Modules and module types can be nested components of each other. +\item When a module declaration is started inside a module type, + the proof editing mode is still unavailable. +\item One can have sections inside a module or a module type, but + not a module or a module type inside a section. +\item Commands like \texttt{Hint} or \texttt{Notation} can + also appear inside modules and module types. Note that in case of a + module definition like: + + \medskip + \noindent + {\tt Module N : SIG := M.} + \medskip + + or + + \medskip + {\tt Module N : SIG.\\ + \ \ \dots\\ + End N.} + \medskip + + hints and the like valid for \texttt{N} are not those defined in + \texttt{M} (or the module body) but the ones defined in + \texttt{SIG}. + +\end{Remarks} + +\subsection{Import {\qualid} +\comindex{Import} +\label{Import}} + +If {\qualid} denotes a valid basic module (i.e. its module type is a +signature), makes its components available by their short names. + +Example: + +\begin{coq_example} +Module Mod. +\end{coq_example} +\begin{coq_example} + Definition T:=nat. + Check T. +\end{coq_example} +\begin{coq_example} +End Mod. +Check Mod.T. +Check T. (* Incorrect ! *) +Import Mod. +Check T. (* Now correct *) +\end{coq_example} +\begin{coq_eval} +Reset Mod. +\end{coq_eval} + + +\begin{Variants} +\item{\tt Export {\qualid}}\comindex{Export} + + When the module containing the command {\tt Export {\qualid}} is + imported, {\qualid} is imported as well. +\end{Variants} + +\begin{ErrMsgs} + \item \errindexbis{{\qualid} is not a module}{is not a module} +% this error is impossible in the import command +% \item \errindex{Cannot mask the absolute name {\qualid} !} +\end{ErrMsgs} + +\begin{Warnings} + \item Warning: Trying to mask the absolute name {\qualid} ! +\end{Warnings} + +\subsection{\tt Print Module {\ident} +\comindex{Print Module}} + +Prints the module type and (optionally) the body of the module {\ident}. + +\subsection{\tt Print Module Type {\ident} +\comindex{Print Module Type}} + +Prints the module type corresponding to {\ident}. + + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-modr.tex b/doc/refman/RefMan-modr.tex new file mode 100644 index 00000000..a52c1847 --- /dev/null +++ b/doc/refman/RefMan-modr.tex @@ -0,0 +1,586 @@ +\chapter{The Module System} +\label{chapter:Modules} + +The module system extends the Calculus of Inductive Constructions +providing a convenient way to structure large developments as well as +a mean of massive abstraction. +%It is described in details in Judicael's thesis and Jacek's thesis + +\section{Modules and module types} + +\paragraph{Access path.} It is denoted by $p$, it can be either a module +variable $X$ or, if $p'$ is an access path and $id$ an identifier, then +$p'.id$ is an access path. + +\paragraph{Structure element.} It is denoted by \Impl\ and is either a +definition of a constant, an assumption, a definition of an inductive +or a definition of a module or a module type abbreviation. + +\paragraph{Module expression.} It is denoted by $M$ and can be: +\begin{itemize} +\item an access path $p$ +\item a structure $\struct{\nelist{\Impl}{;}}$ +\item a functor $\functor{X}{T}{M'}$, where $X$ is a module variable, + $T$ is a module type and $M'$ is a module expression +\item an application of access paths $p' p''$ +\end{itemize} + +\paragraph{Signature element.} It is denoted by \Spec, it is a +specification of a constant, an assumption, an inductive, a module or +a module type abbreviation. + +\paragraph{Module type,} denoted by $T$ can be: +\begin{itemize} +\item a module type name +\item an access path $p$ +\item a signature $\sig{\nelist{\Spec}{;}}$ +\item a functor type $\funsig{X}{T'}{T''}$, where $T'$ and $T''$ are + module types +\end{itemize} + +\paragraph{Module definition,} written $\Mod{X}{T}{M}$ can be a +structure element. It consists of a module variable $X$, a module type +$T$ and a module expression $M$. + +\paragraph{Module specification,} written $\ModS{X}{T}$ or +$\ModSEq{X}{T}{p}$ can be a signature element or a part of an +environment. It consists of a module variable $X$, a module type $T$ +and, optionally, a module path $p$. + +\paragraph{Module type abbreviation,} written $\ModType{S}{T}$, where +$S$ is a module type name and $T$ is a module type. + + +\section{Typing Modules} + +In order to introduce the typing system we first slightly extend +the syntactic class of terms and environments given in +section~\ref{Terms}. The environments, apart from definitions of +constants and inductive types now also hold any other signature elements. +Terms, apart from variables, constants and complex terms, +include also access paths. + +We also need additional typing judgments: +\begin{itemize} +\item \WFT{E}{T}, denoting that a module type $T$ is well-formed, + +\item \WTM{E}{M}{T}, denoting that a module expression $M$ has type $T$ in +environment $E$. + +\item \WTM{E}{\Impl}{\Spec}, denoting that an implementation $\Impl$ + verifies a specification $\Spec$ + +\item \WS{E}{T_1}{T_2}, denoting that a module type $T_1$ is a subtype of a +module type $T_2$. + +\item \WS{E}{\Spec_1}{\Spec_2}, denoting that a specification + $\Spec_1$ is more precise that a specification $\Spec_2$. +\end{itemize} +The rules for forming module types are the following: +\begin{description} +\item[WF-SIG] +\inference{% + \frac{ + \WF{E;E'}{} + }{%%%%%%%%%%%%%%%%%%%%% + \WFT{E}{\sig{E'}} + } +} +\item[WF-FUN] +\inference{% + \frac{ + \WFT{E;\ModS{X}{T}}{T'} + }{%%%%%%%%%%%%%%%%%%%%%%%%%% + \WFT{E}{\funsig{X}{T}{T'}} + } +} +\end{description} +Rules for typing module expressions: +\begin{description} +\item[MT-STRUCT] +\inference{% + \frac{ + \begin{array}{c} + \WFT{E}{\sig{\Spec_1;\dots;\Spec_n}}\\ + \WTM{E;\Spec_1;\dots;\Spec_{i-1}}{\Impl_i}{\Spec_i} + \textrm{ \ \ for } i=1\dots n + \end{array} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WTM{E}{\struct{\Impl_1;\dots;\Impl_n}}{\sig{\Spec_1;\dots;\Spec_n}} + } +} +\item[MT-FUN] +\inference{% + \frac{ + \WTM{E;\ModS{X}{T}}{M}{T'} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WTM{E}{\functor{X}{T}{M}}{\funsig{X}{T}{T'}} + } +} +\item[MT-APP] +\inference{% + \frac{ + \begin{array}{c} + \WTM{E}{p}{\funsig{X_1}{T_1}{\!\dots\funsig{X_n}{T_n}{T'}}}\\ + \WTM{E}{p_i}{T_i\{X_1/p_1\dots X_{i-1}/p_{i-1}\}} + \textrm{ \ \ for } i=1\dots n + \end{array} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WTM{E}{p\; p_1 \dots p_n}{T'\{X_1/p_1\dots X_n/p_n\}} + } +} +\item[MT-SUB] +\inference{% + \frac{ + \WTM{E}{M}{T}~~~~~~~~~~~~\WS{E}{T}{T'} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WTM{E}{M}{T'} + } +} +\item[MT-STR] +\inference{% + \frac{ + \WTM{E}{p}{T} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WTM{E}{p}{T/p} + } +} +\end{description} +The last rule, called strengthening is used to make all module fields +manifestly equal to themselves. The notation $T/p$ has the following +meaning: +\begin{itemize} +\item if $T=\sig{\Spec_1;\dots;\Spec_n}$ then + $T/p=\sig{\Spec_1/p;\dots;\Spec_n/p}$ where $\Spec/p$ is defined as + follows: + \begin{itemize} + \item $\Def{}{c}{U}{t}/p ~=~ \Def{}{c}{U}{t}$ + \item $\Assum{}{c}{U}/p ~=~ \Def{}{c}{p.c}{U}$ + \item $\ModS{X}{T}/p ~=~ \ModSEq{X}{T/p.X}{p.X}$ + \item $\ModSEq{X}{T}{p'}/p ~=~ \ModSEq{X}{T/p}{p'}$ + \item $\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}/p ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ + \item $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}/p ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}$ + \end{itemize} +\item if $T=\funsig{X}{T'}{T''}$ then $T/p=T$ +\item if $T$ is an access path or a module type name, then we have to + unfold its definition and proceed according to the rules above. +\end{itemize} +The notation $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ denotes an +inductive definition that is definitionally equal to the inductive +definition in the module denoted by the path $p$. All rules which have +$\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}$ as premises are also valid for +$\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$. We give the formation rule +for $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ below as well as +the equality rules on inductive types and constructors. + +The module subtyping rules: +\begin{description} +\item[MSUB-SIG] +\inference{% + \frac{ + \begin{array}{c} + \WS{E;\Spec_1;\dots;\Spec_n}{\Spec_{\sigma(i)}}{\Spec'_i} + \textrm{ \ for } i=1..m \\ + \sigma : \{1\dots m\} \ra \{1\dots n\} \textrm{ \ injective} + \end{array} + }{ + \WS{E}{\sig{\Spec_1;\dots;\Spec_n}}{\sig{\Spec'_1;\dots;\Spec'_m}} + } +} +\item[MSUB-FUN] +\inference{% T_1 -> T_2 <: T_1' -> T_2' + \frac{ + \WS{E}{T_1'}{T_1}~~~~~~~~~~\WS{E;\ModS{X}{T_1'}}{T_2}{T_2'} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WS{E}{\funsig{X}{T_1}{T_2}}{\funsig{X}{T_1'}{T_2'}} + } +} +% these are derived rules +% \item[MSUB-EQ] +% \inference{% +% \frac{ +% \WS{E}{T_1}{T_2}~~~~~~~~~~\WTERED{}{T_1}{=}{T_1'}~~~~~~~~~~\WTERED{}{T_2}{=}{T_2'} +% }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% \WS{E}{T_1'}{T_2'} +% } +% } +% \item[MSUB-REFL] +% \inference{% +% \frac{ +% \WFT{E}{T} +% }{ +% \WS{E}{T}{T} +% } +% } +\end{description} +Specification subtyping rules: +\begin{description} +\item[ASSUM-ASSUM] +\inference{% + \frac{ + \WTELECONV{}{U_1}{U_2} + }{ + \WSE{\Assum{}{c}{U_1}}{\Assum{}{c}{U_2}} + } +} +\item[DEF-ASSUM] +\inference{% + \frac{ + \WTELECONV{}{U_1}{U_2} + }{ + \WSE{\Def{}{c}{t}{U_1}}{\Assum{}{c}{U_2}} + } +} +\item[ASSUM-DEF] +\inference{% + \frac{ + \WTELECONV{}{U_1}{U_2}~~~~~~~~\WTECONV{}{c}{t_2} + }{ + \WSE{\Assum{}{c}{U_1}}{\Def{}{c}{t_2}{U_2}} + } +} +\item[DEF-DEF] +\inference{% + \frac{ + \WTELECONV{}{U_1}{U_2}~~~~~~~~\WTECONV{}{t_1}{t_2} + }{ + \WSE{\Def{}{c}{t_1}{U_1}}{\Def{}{c}{t_2}{U_2}} + } +} +\item[IND-IND] +\inference{% + \frac{ + \WTECONV{}{\Gamma_P}{\Gamma_P'}% + ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% + ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% + }{ + \WSE{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}% + {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} + } +} +\item[INDP-IND] +\inference{% + \frac{ + \WTECONV{}{\Gamma_P}{\Gamma_P'}% + ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% + ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% + }{ + \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% + {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} + } +} +\item[INDP-INDP] +\inference{% + \frac{ + \WTECONV{}{\Gamma_P}{\Gamma_P'}% + ~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% + ~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% + ~~~~~~\WTECONV{}{p}{p'} + }{ + \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% + {\Indp{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}{p'}} + } +} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\item[MODS-MODS] +\inference{% + \frac{ + \WSE{T_1}{T_2} + }{ + \WSE{\ModS{m}{T_1}}{\ModS{m}{T_2}} + } +} +\item[MODEQ-MODS] +\inference{% + \frac{ + \WSE{T_1}{T_2} + }{ + \WSE{\ModSEq{m}{T_1}{p}}{\ModS{m}{T_2}} + } +} +\item[MODS-MODEQ] +\inference{% + \frac{ + \WSE{T_1}{T_2}~~~~~~~~\WTECONV{}{m}{p_2} + }{ + \WSE{\ModS{m}{T_1}}{\ModSEq{m}{T_2}{p_2}} + } +} +\item[MODEQ-MODEQ] +\inference{% + \frac{ + \WSE{T_1}{T_2}~~~~~~~~\WTECONV{}{p_1}{p_2} + }{ + \WSE{\ModSEq{m}{T_1}{p_1}}{\ModSEq{m}{T_2}{p_2}} + } +} +\item[MODTYPE-MODTYPE] +\inference{% + \frac{ + \WSE{T_1}{T_2}~~~~~~~~\WSE{T_2}{T_1} + }{ + \WSE{\ModType{S}{T_1}}{\ModType{S}{T_2}} + } +} +\end{description} +Verification of the specification +\begin{description} +\item[IMPL-SPEC] +\inference{% + \frac{ + \begin{array}{c} + \WF{E;\Spec}{}\\ + \Spec \textrm{\ is one of } {\sf Def, Assum, Ind, ModType} + \end{array} + }{ + \WTE{}{\Spec}{\Spec} + } +} +\item[MOD-MODS] +\inference{% + \frac{ + \WF{E;\ModS{m}{T}}{}~~~~~~~~\WTE{}{M}{T} + }{ + \WTE{}{\Mod{m}{T}{M}}{\ModS{m}{T}} + } +} +\item[MOD-MODEQ] +\inference{% + \frac{ + \WF{E;\ModSEq{m}{T}{p}}{}~~~~~~~~~~~\WTECONV{}{p}{p'} + }{ + \WTE{}{\Mod{m}{T}{p'}}{\ModSEq{m}{T}{p'}} + } +} +\end{description} +New environment formation rules +\begin{description} +\item[WF-MODS] +\inference{% + \frac{ + \WF{E}{}~~~~~~~~\WFT{E}{T} + }{ + \WF{E;\ModS{m}{T}}{} + } +} +\item[WF-MODEQ] +\inference{% + \frac{ + \WF{E}{}~~~~~~~~~~~\WTE{}{p}{T} + }{ + \WF{E,\ModSEq{m}{T}{p}}{} + } +} +\item[WF-MODTYPE] +\inference{% + \frac{ + \WF{E}{}~~~~~~~~~~~\WFT{E}{T} + }{ + \WF{E,\ModType{S}{T}}{} + } +} +\item[WF-IND] +\inference{% + \frac{ + \begin{array}{c} + \WF{E;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}{}\\ + \WT{E}{}{p:\sig{\Spec_1;\dots;\Spec_n;\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'};\dots}}\\ + \WS{E}{\subst{\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}}{p.l}{l}_{l + \in \lab{Spec_1;\dots;Spec_n}}}{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}} + \end{array} + }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + \WF{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{} + } +} +\end{description} +Component access rules +\begin{description} +\item[ACC-TYPE] +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\Assum{}{c}{U};\dots}} + }{ + \WTEG{p.c}{\subst{U}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\\ +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\Def{}{c}{t}{U};\dots}} + }{ + \WTEG{p.c}{\subst{U}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\item[ACC-DELTA] +Notice that the following rule extends the delta rule defined in +section~\ref{delta} +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\Def{}{c}{t}{U};\dots}} + }{ + \WTEGRED{p.c}{\triangleright_\delta}{\subst{t}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\\ +In the rules below we assume $\Gamma_P$ is $[p_1:P_1;\ldots;p_r:P_r]$, + $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is + $[c_1:C_1;\ldots;c_n:C_n]$ +\item[ACC-IND] +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;\Spec_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} + }{ + \WTEG{p.I_j}{\subst{(p_1:P_1)\ldots(p_r:P_r)A_j}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;\Spec_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} + }{ + \WTEG{p.c_m}{\subst{(p_1:P_1)\ldots(p_r:P_r)\subst{C_m}{I_j}{(I_j~p_1\ldots + p_r)}_{j=1\ldots k}}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\item[ACC-INDP] +\inference{% + \frac{ + \WT{E}{}{p}{\sig{\Spec_1;\dots;\Spec_n;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} + }{ + \WTRED{E}{}{p.I_i}{\triangleright_\delta}{p'.I_i} + } +} +\inference{% + \frac{ + \WT{E}{}{p}{\sig{\Spec_1;\dots;\Spec_n;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} + }{ + \WTRED{E}{}{p.c_i}{\triangleright_\delta}{p'.c_i} + } +} +%%%%%%%%%%%%%%%%%%%%%%%%%%% MODULES +\item[ACC-MOD] +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\ModS{m}{T};\dots}} + }{ + \WTEG{p.m}{\subst{T}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\\ +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\ModSEq{m}{T}{p'};\dots}} + }{ + \WTEG{p.m}{\subst{T}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\item[ACC-MODEQ] +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\ModSEq{m}{T}{p'};\dots}} + }{ + \WTEGRED{p.m}{\triangleright_\delta}{\subst{p'}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\item[ACC-MODTYPE] +\inference{% + \frac{ + \WTEG{p}{\sig{\Spec_1;\dots;Spec_i;\ModType{S}{T};\dots}} + }{ + \WTEGRED{p.S}{\triangleright_\delta}{\subst{T}{p.l}{l}_{l \in \lab{Spec_1;\dots;Spec_i}}} + } +} +\end{description} +The function $\lab{}$ is used to calculate the set of label of +the set of specifications. It is defined by +$\lab{\Spec_1;\dots;\Spec_n}=\lab{\Spec_1}\cup\dots;\cup\lab{\Spec_n}$ +where $\lab{\Spec}$ is defined as follows: +\begin{itemize} +\item $\lab{\Assum{\Gamma}{c}{U}}=\{c\}$, +\item $\lab{\Def{\Gamma}{c}{t}{U}}=\{c\}$, +\item + $\lab{\Ind{\Gamma}{\Gamma_P}{\Gamma_C}{\Gamma_I}}=\dom{\Gamma_C}\cup\dom{\Gamma_I}$, +\item $\lab{\ModS{m}{T}}=\{m\}$, +\item $\lab{\ModSEq{m}{T}{M}}=\{m\}$, +\item $\lab{\ModType{S}{T}}=\{S\}$ +\end{itemize} +Environment access for modules and module types +\begin{description} +\item[ENV-MOD] +\inference{% + \frac{ + \WF{E;\ModS{m}{T};E'}{\Gamma} + }{ + \WT{E;\ModS{m}{T};E'}{\Gamma}{m}{T} + } +} +\item[] +\inference{% + \frac{ + \WF{E;\ModSEq{m}{T}{p};E'}{\Gamma} + }{ + \WT{E;\ModSEq{m}{T}{p};E'}{\Gamma}{m}{T} + } +} +\item[ENV-MODEQ] +\inference{% + \frac{ + \WF{E;\ModSEq{m}{T}{p};E'}{\Gamma} + }{ + \WTRED{E;\ModSEq{m}{T}{p};E'}{\Gamma}{m}{\triangleright_\delta}{p} + } +} +\item[ENV-MODTYPE] +\inference{% + \frac{ + \WF{E;\ModType{S}{T};E'}{\Gamma} + }{ + \WTRED{E;\ModType{S}{T};E'}{\Gamma}{S}{\triangleright_\delta}{T} + } +} +\item[ENV-INDP] +\inference{% + \frac{ + \WF{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{} + }{ + \WTRED{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{}{I_i}{\triangleright_\delta}{p.I_i} + } +} +\inference{% + \frac{ + \WF{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{} + }{ + \WTRED{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{}{c_i}{\triangleright_\delta}{p.c_i} + } +} +\end{description} +% %%% replaced by \triangle_\delta +% Module path equality is a transitive and reflexive closure of the +% relation generated by ACC-MODEQ and ENV-MODEQ. +% \begin{itemize} +% \item []MP-EQ-REFL +% \inference{% +% \frac{ +% \WTEG{p}{T} +% }{ +% \WTEG{p}{p} +% } +% } +% \item []MP-EQ-TRANS +% \inference{% +% \frac{ +% \WTEGRED{p}{=}{p'}~~~~~~\WTEGRED{p'}{=}{p''} +% }{ +% \WTEGRED{p'}{=}{p''} +% } +% } + +% \end{itemize} + + +% $Id: RefMan-modr.tex 8606 2006-02-23 13:58:10Z herbelin $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: + diff --git a/doc/refman/RefMan-oth.tex b/doc/refman/RefMan-oth.tex new file mode 100644 index 00000000..e92cde74 --- /dev/null +++ b/doc/refman/RefMan-oth.tex @@ -0,0 +1,773 @@ +\chapter{Vernacular commands} +\label{Vernacular-commands} +\label{Other-commands} + +\section{Displaying} + +\subsection{\tt Print {\qualid}.}\comindex{Print} +This command displays on the screen informations about the declared or +defined object referred by {\qualid}. + +\begin{ErrMsgs} +\item {\qualid} \errindex{not a defined object} +\end{ErrMsgs} + +\begin{Variants} +\item {\tt Print Term {\qualid}.} +\comindex{Print Term}\\ +This is a synonym to {\tt Print {\qualid}} when {\qualid} denotes a +global constant. + +\item {\tt About {\qualid}.} +\label{About} +\comindex{About}\\ +This displays various informations about the object denoted by {\qualid}: +its kind (module, constant, assumption, inductive, +constructor, abbreviation\ldots), long name, type, implicit +arguments and argument scopes. + +%\item {\tt Print Proof {\qualid}.}\comindex{Print Proof}\\ +%In case \qualid\ denotes an opaque theorem defined in a section, +%it is stored on a special unprintable form and displayed as +%{\tt }. {\tt Print Proof} forces the printable form of \qualid\ +%to be computed and displays it. +\end{Variants} + +\subsection{\tt Print All.}\comindex{Print All} +This command displays informations about the current state of the +environment, including sections and modules. + +\begin{Variants} +\item {\tt Inspect \num.}\comindex{Inspect}\\ +This command displays the {\num} last objects of the current +environment, including sections and modules. +\item {\tt Print Section {\ident}.}\comindex{Print Section}\\ +should correspond to a currently open section, this command +displays the objects defined since the beginning of this section. +% Discontinued +%% \item {\tt Print.}\comindex{Print}\\ +%% This command displays the axioms and variables declarations in the +%% environment as well as the constants defined since the last variable +%% was introduced. +\end{Variants} + +\section{Requests to the environment} + +\subsection{\tt Check {\term}.} +\label{Check} +\comindex{Check} +This command displays the type of {\term}. When called in proof mode, +the term is checked in the local context of the current subgoal. + +\subsection{\tt Eval {\rm\sl convtactic} in {\term}.} +\comindex{Eval} + +This command performs the specified reduction on {\term}, and displays +the resulting term with its type. The term to be reduced may depend on +hypothesis introduced in the first subgoal (if a proof is in +progress). + +\SeeAlso section~\ref{Conversion-tactics}. + +\subsection{\tt Extraction \term.} +\label{ExtractionTerm} +\comindex{Extraction} +This command displays the extracted term from +{\term}. The extraction is processed according to the distinction +between {\Set} and {\Prop}; that is to say, between logical and +computational content (see section \ref{Sorts}). The extracted term is +displayed in Objective Caml syntax, where global identifiers are still +displayed as in \Coq\ terms. + +\begin{Variants} +\item \texttt{Recursive Extraction {\qualid$_1$} \ldots{} {\qualid$_n$}.}\\ + Recursively extracts all the material needed for the extraction of + globals {\qualid$_1$} \ldots{} {\qualid$_n$}. +\end{Variants} + +\SeeAlso chapter~\ref{Extraction}. + +\subsection{\tt Opaque \qualid$_1$ \dots \qualid$_n$.} +\comindex{Opaque}\label{Opaque} This command tells not to unfold the +the constants {\qualid$_1$} \dots {\qualid$_n$} in tactics using +$\delta$-conversion. Unfolding a constant is replacing it by its +definition. {\tt Opaque} can only apply on constants originally +defined as {\tt Transparent}. + +Constants defined by a proof ended by {\tt Qed} are automatically +stamped as {\tt Opaque} and can no longer be considered as {\tt +Transparent}. This is to keep with the usual mathematical practice of +{\em proof irrelevance}: what matters in a mathematical development is +the sequence of lemma statements, not their actual proofs. This +distinguishes lemmas from the usual defined constants, whose actual +values are of course relevant in general. + +\SeeAlso sections \ref{Conversion-tactics}, \ref{Automatizing}, +\ref{Theorem} + +\begin{ErrMsgs} +\item \errindex{The reference \qualid\ was not found in the current +environment}\\ + There is no constant referred by {\qualid} in the environment. + Nevertheless, if you asked \texttt{Opaque foo bar} + and if \texttt{bar} does not exist, \texttt{foo} is set opaque. +\end{ErrMsgs} + +\subsection{\tt Transparent \qualid$_1$ \dots \qualid$_n$.} +\comindex{Transparent}\label{Transparent} +This command is the converse of {\tt Opaque} and can only apply on constants originally defined as {\tt Transparent} to restore their initial behaviour after an {\tt Opaque} command. + +The constants automatically declared transparent are the ones defined by a proof ended by {\tt Defined}, or by a {\tt + Definition} or {\tt Local} with an explicit body. + +\Warning {\tt Transparent} and \texttt{Opaque} are not synchronous +with the reset mechanism. If a constant was transparent at point A, if +you set it opaque at point B and reset to point A, you return to state +of point A with the difference that the constant is still opaque. This +can cause changes in tactic scripts behaviour. + +At section or module closing, a constant recovers the status it got at +the time of its definition. + +%TODO: expliquer le rapport avec les sections + +\begin{ErrMsgs} +% \item \errindex{Can not set transparent.}\\ +% It is a constant from a required module or a parameter. +\item \errindex{The reference \qualid\ was not found in the current +environment}\\ + There is no constant referred by {\qualid} in the environment. +\end{ErrMsgs} + +\SeeAlso sections \ref{Conversion-tactics}, \ref{Automatizing}, +\ref{Theorem} + +\subsection{\tt Search {\qualid}.}\comindex{Search} +This command displays the name and type of all theorems of the current +context whose statement's conclusion has the form {\tt ({\qualid} t1 .. + tn)}. This command is useful to remind the user of the name of +library lemmas. +\begin{ErrMsgs} +\item \errindex{The reference \qualid\ was not found in the current +environment}\\ + There is no constant in the environment named \qualid. +\end{ErrMsgs} + +\begin{Variants} +\item +{\tt Search {\qualid} inside {\module$_1$} \ldots{} {\module$_n$}.} + +This restricts the search to constructions defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\item {\tt Search {\qualid} outside {\module$_1$} \ldots{} {\module$_n$}.} + +This restricts the search to constructions not defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\begin{ErrMsgs} +\item \errindex{Module/section \module{} not found} +No module \module{} has been required (see section~\ref{Require}). +\end{ErrMsgs} + +\end{Variants} + +\subsection{\tt SearchAbout {\qualid}.}\comindex{SearchAbout} +This command displays the name and type of all objects (theorems, +axioms, etc) of the current context whose statement contains \qualid. +This command is useful to remind the user of the name of library +lemmas. + +\begin{ErrMsgs} +\item \errindex{The reference \qualid\ was not found in the current +environment}\\ + There is no constant in the environment named \qualid. +\end{ErrMsgs} + +\begin{Variants} +\item {\tt SearchAbout [ \nelist{\textrm{\textsl{qualid-or-string}}}{} +].}\\ +\noindent where {\textrm{\textsl{qualid-or-string}}} is a {\qualid} or +a {\str}. + +This extension of {\tt SearchAbout} searches for all objects whose +statement mentions all of {\qualid} of the list and whose name +contains all {\str} of the list. + +\Example + +\begin{coq_example} +Require Import ZArith. +SearchAbout [ Zmult Zplus "distr" ]. +\end{coq_example} + +\item +\begin{tabular}[t]{@{}l} + {\tt SearchAbout {\term} inside {\module$_1$} \ldots{} {\module$_n$}.} \\ + {\tt SearchAbout [ \nelist{\textrm{\textsl{qualid-or-string}}}{} ] + inside {\module$_1$} \ldots{} {\module$_n$}.} +\end{tabular} + +This restricts the search to constructions defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\item +\begin{tabular}[t]{@{}l} + {\tt SearchAbout {\term} outside {\module$_1$}...{\module$_n$}.} \\ + {\tt SearchAbout [ \nelist{\textrm{\textsl{qualid-or-string}}}{} ] + outside {\module$_1$}...{\module$_n$}.} +\end{tabular} + +This restricts the search to constructions not defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\end{Variants} + +\subsection{\tt SearchPattern {\term}.}\comindex{SearchPattern} + +This command displays the name and type of all theorems of the current +context whose statement's conclusion matches the expression {\term} +where holes in the latter are denoted by ``{\texttt \_}''. + +\begin{coq_example} +Require Import Arith. +SearchPattern (_ + _ = _ + _). +\end{coq_example} + +Patterns need not be linear: you can express that the same expression +must occur in two places by using pattern variables `{\texttt +?{\ident}}''. + +\begin{coq_example} +Require Import Arith. +SearchPattern (?X1 + _ = _ + ?X1). +\end{coq_example} + +\begin{Variants} +\item {\tt SearchPattern {\term} inside +{\module$_1$} \ldots{} {\module$_n$}.}\comindex{SearchPattern \ldots{} inside +\ldots{}} + +This restricts the search to constructions defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\item {\tt SearchPattern {\term} outside {\module$_1$} \ldots{} {\module$_n$}.}\comindex{SearchPattern \ldots{} outside \ldots{}} + +This restricts the search to constructions not defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\end{Variants} + +\subsection{\tt SearchRewrite {\term}.}\comindex{SearchRewrite} + +This command displays the name and type of all theorems of the current +context whose statement's conclusion is an equality of which one side matches +the expression {\term =}. Holes in {\term} are denoted by ``{\texttt \_}''. + +\begin{coq_example} +Require Import Arith. +SearchRewrite (_ + _ + _). +\end{coq_example} + +\begin{Variants} +\item {\tt SearchRewrite {\term} inside +{\module$_1$} \ldots{} {\module$_n$}.} + +This restricts the search to constructions defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\item {\tt SearchRewrite {\term} outside {\module$_1$} \ldots{} {\module$_n$}.} + +This restricts the search to constructions not defined in modules +{\module$_1$} \ldots{} {\module$_n$}. + +\end{Variants} + +% \subsection{\tt SearchIsos {\term}.}\comindex{SearchIsos} +% \label{searchisos} +% \texttt{SearchIsos} searches terms by their type modulo isomorphism. +% This command displays the full name of all constants, variables, +% inductive types, and inductive constructors of the current +% context whose type is isomorphic to {\term} modulo the contextual part of the +% following axiomatization (the mutual inductive types with one constructor, +% without implicit arguments, and for which projections exist, are regarded as a +% sequence of $\sa{}$): + + +% \begin{tabbing} +% \ \ \ \ \=11.\ \=\kill +% \>1.\>$A=B\mx{ if }A\stackrel{\bt{}\io{}}{\lra{}}B$\\ +% \>2.\>$\sa{}x:A.B=\sa{}y:A.B[x\la{}y]\mx{ if }y\not\in{}FV(\sa{}x:A.B)$\\ +% \>3.\>$\Pi{}x:A.B=\Pi{}y:A.B[x\la{}y]\mx{ if }y\not\in{}FV(\Pi{}x:A.B)$\\ +% \>4.\>$\sa{}x:A.B=\sa{}x:B.A\mx{ if }x\not\in{}FV(A,B)$\\ +% \>5.\>$\sa{}x:(\sa{}y:A.B).C=\sa{}x:A.\sa{}y:B[y\la{}x].C[x\la{}(x,y)]$\\ +% \>6.\>$\Pi{}x:(\sa{}y:A.B).C=\Pi{}x:A.\Pi{}y:B[y\la{}x].C[x\la{}(x,y)]$\\ +% \>7.\>$\Pi{}x:A.\sa{}y:B.C=\sa{}y:(\Pi{}x:A.B).(\Pi{}x:A.C[y\la{}(y\sm{}x)]$\\ +% \>8.\>$\sa{}x:A.unit=A$\\ +% \>9.\>$\sa{}x:unit.A=A[x\la{}tt]$\\ +% \>10.\>$\Pi{}x:A.unit=unit$\\ +% \>11.\>$\Pi{}x:unit.A=A[x\la{}tt]$ +% \end{tabbing} + +% For more informations about the exact working of this command, see +% \cite{Del97}. + +\subsection{\tt Locate {\qualid}.}\comindex{Locate} +\label{Locate} +This command displays the full name of the qualified identifier {\qualid} +and consequently the \Coq\ module in which it is defined. + +\begin{coq_eval} +(*************** The last line should produce **************************) +(*********** Error: I.Dont.Exist not a defined object ******************) +\end{coq_eval} +\begin{coq_eval} +Set Printing Depth 50. +\end{coq_eval} +\begin{coq_example} +Locate nat. +Locate Datatypes.O. +Locate Init.Datatypes.O. +Locate Coq.Init.Datatypes.O. +Locate I.Dont.Exist. +\end{coq_example} + +\SeeAlso Section \ref{LocateSymbol} + +\section{Loading files} + +\Coq\ offers the possibility of loading different +parts of a whole development stored in separate files. Their contents +will be loaded as if they were entered from the keyboard. This means +that the loaded files are ASCII files containing sequences of commands +for \Coq's toplevel. This kind of file is called a {\em script} for +\Coq\index{Script file}. The standard (and default) extension of +\Coq's script files is {\tt .v}. + +\subsection{\tt Load {\ident}.} +\comindex{Load}\label{Load} +This command loads the file named {\ident}{\tt .v}, searching +successively in each of the directories specified in the {\em + loadpath}. (see section \ref{loadpath}) + +\begin{Variants} +\item {\tt Load {\str}.}\label{Load-str}\\ + Loads the file denoted by the string {\str}, where {\str} is any + complete filename. Then the \verb.~. and {\tt ..} + abbreviations are allowed as well as shell variables. If no + extension is specified, \Coq\ will use the default extension {\tt + .v} +\item {\tt Load Verbose {\ident}.}, + {\tt Load Verbose {\str}}\\ + \comindex{Load Verbose} + Display, while loading, the answers of \Coq\ to each command + (including tactics) contained in the loaded file + \SeeAlso section \ref{Begin-Silent} +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{Can't find file {\ident} on loadpath} +\end{ErrMsgs} + +\section{Compiled files}\label{compiled}\index{Compiled files} + +This feature allows to build files for a quick loading. When loaded, +the commands contained in a compiled file will not be {\em replayed}. +In particular, proofs will not be replayed. This avoids a useless +waste of time. + +\Rem A module containing an opened section cannot be compiled. + +% \subsection{\tt Compile Module {\ident}.} +% \index{Modules} +% \comindex{Compile Module} +% \index{.vo files} +% This command loads the file +% {\ident}{\tt .v} and plays the script it contains. Declarations, +% definitions and proofs it contains are {\em "packaged"} in a compiled +% form: the {\em module} named {\ident}. +% A file {\ident}{\tt .vo} is then created. +% The file {\ident}{\tt .v} is searched according to the +% current loadpath. +% The {\ident}{\tt .vo} is then written in the directory where +% {\ident}{\tt .v} was found. + +% \begin{Variants} +% \item \texttt{Compile Module {\ident} {\str}.}\\ +% Uses the file {\str}{\tt .v} or {\str} if the previous one does not +% exist to build the module {\ident}. In this case, {\str} is any +% string giving a filename in the UNIX sense (see section +% \ref{Load-str}). +% \Warning The given filename can not contain other caracters than +% the caracters of \Coq's identifiers : letters or digits or the +% underscore symbol ``\_''. + +% \item \texttt{Compile Module Specification {\ident}.}\\ +% \comindex{Compile Module Specification} +% Builds a specification module: only the types of terms are stored +% in the module. The bodies (the proofs) are {\em not} written +% in the module. In that case, the file created is {\ident}{\tt .vi}. +% This is only useful when proof terms take too much place in memory +% and are not necessary. + +% \item \texttt{Compile Verbose Module {\ident}.}\\ +% \comindex{Compile Verbose Module} +% Verbose version of Compile: shows the contents of the file being +% compiled. +% \end{Variants} + +% These different variants can be combined. + + +% \begin{ErrMsgs} +% \item \texttt{You cannot open a module when there are things other than}\\ +% \texttt{Modules and Imports in the context.}\\ +% The only commands allowed before a {Compile Module} command are {\tt +% Require},\\ +% {\tt Read Module} and {\tt Import}. Actually, The normal way to +% compile modules is by the {\tt coqc} command (see chapter +% \ref{Addoc-coqc}). +% \end{ErrMsgs} + +% \SeeAlso sections \ref{Opaque}, \ref{loadpath}, chapter +% \ref{Addoc-coqc} + +%\subsection{\tt Import {\qualid}.}\comindex{Import} +%\label{Import} + +%%%%%%%%%%%% +% Import and Export described in RefMan-mod.tex +% the minor difference (to avoid multiple Exporting of libraries) in +% the treatment of normal modules and libraries by Export omitted + + +\subsection{\tt Require {\dirpath}.} +\label{Require} +\comindex{Require} + +This command looks in the loadpath for a file containing module +{\dirpath}, then loads and opens (imports) its contents. +More precisely, if {\dirpath} splits into a library dirpath {\dirpath'} and a module name {\textsl{ident}}, then the file {\ident}{\tt .vo} is searched in a physical path mapped to the logical path {\dirpath'}. + +TODO: effect on the name table. + +% The implementation file ({\ident}{\tt .vo}) is searched first, +% then the specification file ({\ident}{\tt .vi}) in case of failure. +If the module required has already been loaded, \Coq\ +simply opens it (as {\tt Import {\dirpath}} would do it). +%If the module required is already loaded and open, \Coq\ +%displays the following warning: {\tt {\ident} already imported}. + +If a module {\it A} contains a command {\tt Require} {\it B} then the +command {\tt Require} {\it A} loads the module {\it B} but does not +open it (See the {\tt Require Export} variant below). + +\begin{Variants} +\item {\tt Require Export {\qualid}.}\\ + \comindex{Require Export} + This command acts as {\tt Require} {\qualid}. But if a module {\it + A} contains a command {\tt Require Export} {\it B}, then the + command {\tt Require} {\it A} opens the module {\it B} as if the + user would have typed {\tt Require}{\it B}. +% \item {\tt Require $[$ Implementation $|$ Specification $]$ {\qualid}.}\\ +% \comindex{Require Implementation} +% \comindex{Require Specification} +% Is the same as {\tt Require}, but specifying explicitly the +% implementation ({\tt.vo} file) or the specification ({\tt.vi} +% file). + +% Redundant ? +% \item {\tt Require {\qualid} {\str}.}\\ +% Specifies the file to load as being {\str} but containing module +% {\qualid}. +% The opened module is still {\ident} and therefore must have been loaded. +\item {\tt Require {\qualid} {\str}.}\\ + Specifies the file to load as being {\str} but containing module + {\qualid} which is then opened. +\end{Variants} + +These different variants can be combined. + +\begin{ErrMsgs} + +\item \errindex{Cannot load {\ident}: no physical path bound to {\dirpath}} + +\item \errindex{Can't find module toto on loadpath} + + The command did not find the file {\tt toto.vo}. Either {\tt + toto.v} exists but is not compiled or {\tt toto.vo} is in a directory + which is not in your {\tt LoadPath} (see section \ref{loadpath}). + +\item \errindex{Bad magic number} + + \index{Bad-magic-number@{\tt Bad Magic Number}} + The file {\tt{\ident}.vo} was found but either it is not a \Coq\ + compiled module, or it was compiled with an older and incompatible + version of \Coq. +\end{ErrMsgs} + +\SeeAlso chapter \ref{Addoc-coqc} + +\subsection{\tt Print Modules.} +\comindex{Print Modules} +This command shows the currently loaded and currently opened +(imported) modules. + +\subsection{\tt Declare ML Module {\str$_1$} .. {\str$_n$}.} +\comindex{Declare ML Module} +This commands loads the Objective Caml compiled files {\str$_1$} \dots +{\str$_n$} (dynamic link). It is mainly used to load tactics +dynamically. +% (see chapter \ref{WritingTactics}). + The files are +searched into the current Objective Caml loadpath (see the command {\tt +Add ML Path} in the section \ref{loadpath}). Loading of Objective Caml +files is only possible under the bytecode version of {\tt coqtop} +(i.e. {\tt coqtop} called with options {\tt -byte}, see chapter +\ref{Addoc-coqc}). + +\begin{ErrMsgs} +\item \errindex{File not found on loadpath : \str} +\item \errindex{Loading of ML object file forbidden in a native Coq} +\end{ErrMsgs} + +\subsection{\tt Print ML Modules.}\comindex{Print ML Modules} +This print the name of all \ocaml{} modules loaded with \texttt{Declare + ML Module}. To know from where these module were loaded, the user +should use the command \texttt{Locate File} (see page \pageref{Locate File}) + +\section{Loadpath} +\label{loadpath}\index{Loadpath} + +There are currently two loadpaths in \Coq. A loadpath where seeking +{\Coq} files (extensions {\tt .v} or {\tt .vo} or {\tt .vi}) and one where +seeking Objective Caml files. The default loadpath contains the +directory ``\texttt{.}'' denoting the current directory and mapped to the empty logical path (see section \ref{LongNames}). + +\subsection{\tt Pwd.}\comindex{Pwd}\label{Pwd} +This command displays the current working directory. + +\subsection{\tt Cd {\str}.}\comindex{Cd} +This command changes the current directory according to {\str} +which can be any valid path. + +\begin{Variants} +\item {\tt Cd.}\\ + Is equivalent to {\tt Pwd.} +\end{Variants} + +\subsection{\tt Add LoadPath {\str} as {\dirpath}.} +\comindex{Add LoadPath}\label{AddLoadPath} + +This command adds the path {\str} to the current {\Coq} loadpath and +maps it to the logical directory {\dirpath}, which means that every +file {\tt M.v} physically lying in directory {\str} becomes accessible +through logical name ``{\dirpath}{\tt{.M}}''. + +\Rem {\tt Add LoadPath} also adds {\str} to the current ML loadpath. + +\begin{Variants} +\item {\tt Add LoadPath {\str}.}\\ +Performs as {\tt Add LoadPath {\str} as {\dirpath}} but for the empty directory path. +\end{Variants} + +\subsection{\tt Add Rec LoadPath {\str} as {\dirpath}.}\comindex{Add Rec LoadPath}\label{AddRecLoadPath} +This command adds the directory {\str} and all its subdirectories +to the current \Coq\ loadpath. The top directory {\str} is mapped to the logical directory {\dirpath} while any subdirectory {\textsl{pdir}} is mapped to logical directory {\dirpath}{\tt{.pdir}} and so on. + +\Rem {\tt Add Rec LoadPath} also recursively adds {\str} to the current ML loadpath. + +\begin{Variants} +\item {\tt Add Rec LoadPath {\str}.}\\ +Works as {\tt Add Rec LoadPath {\str} as {\dirpath}} but for the empty logical directory path. +\end{Variants} + +\subsection{\tt Remove LoadPath {\str}.}\comindex{Remove LoadPath} +This command removes the path {\str} from the current \Coq\ loadpath. + +\subsection{\tt Print LoadPath.}\comindex{Print LoadPath} +This command displays the current \Coq\ loadpath. + +\subsection{\tt Add ML Path {\str}.}\comindex{Add ML Path} +This command adds the path {\str} to the current Objective Caml loadpath (see +the command {\tt Declare ML Module} in the section \ref{compiled}). + +\Rem This command is implied by {\tt Add LoadPath {\str} as {\dirpath}}. + +\subsection{\tt Add Rec ML Path {\str}.}\comindex{Add Rec ML Path} +This command adds the directory {\str} and all its subdirectories +to the current Objective Caml loadpath (see +the command {\tt Declare ML Module} in the section \ref{compiled}). + +\Rem This command is implied by {\tt Add Rec LoadPath {\str} as {\dirpath}}. + +\subsection{\tt Print ML Path {\str}.}\comindex{Print ML Path} +This command displays the current Objective Caml loadpath. +This command makes sense only under the bytecode version of {\tt +coqtop}, i.e. using option {\tt -byte} (see the +command {\tt Declare ML Module} in the section +\ref{compiled}). + +\subsection{\tt Locate File {\str}.}\comindex{Locate + File}\label{Locate File} +This command displays the location of file {\str} in the current loadpath. +Typically, {\str} is a \texttt{.cmo} or \texttt{.vo} or \texttt{.v} file. + +\subsection{\tt Locate Library {\dirpath}.} +\comindex{Locate Library} +This command gives the status of the \Coq\ module {\dirpath}. It tells if the +module is loaded and if not searches in the load path for a module +of logical name {\dirpath}. + +\section{States and Reset} + +\subsection{\tt Reset \ident.} +\comindex{Reset} +This command removes all the objects in the environment since \ident\ +was introduced, including \ident. \ident\ may be the name of a defined +or declared object as well as the name of a section. One cannot reset +over the name of a module or of an object inside a module. + +\begin{ErrMsgs} +\item \ident: \errindex{no such entry} +\end{ErrMsgs} + +\subsection{\tt Back.} +\comindex{Back} + +This commands undoes all the effects of the last vernacular +command. This does not include commands that only access to the +environment like those described in the previous sections of this +chapter (for instance {\tt Require} and {\tt Load} can be undone, but +not {\tt Check} and {\tt Locate}). Commands read from a vernacular +file are considered as a single command. + +\begin{Variants} +\item {\tt Back $n$} \\ + Undoes $n$ vernacular commands. +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{Reached begin of command history} \\ + Happens when there is vernacular command to undo. +\end{ErrMsgs} + +\subsection{\tt Restore State \str.} +\comindex{Restore State} + Restores the state contained in the file \str. + +\begin{Variants} +\item {\tt Restore State \ident}\\ + Equivalent to {\tt Restore State "}{\ident}{\tt .coq"}. +\item {\tt Reset Initial.}\comindex{Reset Initial}\\ + Goes back to the initial state (like after the command {\tt coqtop}, + when the interactive session began). This command is only available + interactively. +\end{Variants} + +\subsection{\tt Write State \str.} +\comindex{Write State} +Writes the current state into a file \str{} for +use in a further session. This file can be given as the {\tt + inputstate} argument of the commands {\tt coqtop} and {\tt coqc}. + +\begin{Variants} +\item {\tt Write State \ident}\\ + Equivalent to {\tt Write State "}{\ident}{\tt .coq"}. + The state is saved in the current directory (see \pageref{Pwd}). +\end{Variants} + +\section{Quitting and debugging} + +\subsection{\tt Quit.}\comindex{Quit} +This command permits to quit \Coq. + +\subsection{\tt Drop.}\comindex{Drop}\label{Drop} + +This is used mostly as a debug facility by \Coq's implementors +and does not concern the casual user. +This command permits to leave {\Coq} temporarily and enter the +Objective Caml toplevel. The Objective Caml command: + +\begin{flushleft} +\begin{verbatim} +#use "include";; +\end{verbatim} +\end{flushleft} + +\noindent add the right loadpaths and loads some toplevel printers for +all abstract types of \Coq - section\_path, identfifiers, terms, judgements, +\dots. You can also use the file \texttt{base\_include} instead, +that loads only the pretty-printers for section\_paths and +identifiers. +% See section \ref{test-and-debug} more information on the +% usage of the toplevel. +You can return back to \Coq{} with the command: + +\begin{flushleft} +\begin{verbatim} +go();; +\end{verbatim} +\end{flushleft} + +\begin{Warnings} +\item It only works with the bytecode version of {\Coq} (i.e. {\tt coqtop} called with option {\tt -byte}, see page \pageref{binary-images}). +\item You must have compiled {\Coq} from the source package and set the + environment variable \texttt{COQTOP} to the root of your copy of the sources (see section \ref{EnvVariables}). +\end{Warnings} + +\subsection{\tt Time \textrm{\textsl{command}}.}\comindex{Time} +\label{time} +This command executes the vernac command \textrm{\textsl{command}} +and display the time needed to execute it. + +\section{Controlling display} + +\subsection{\tt Set Silent.} +\comindex{Begin Silent} +\label{Begin-Silent} +\index{Silent mode} +This command turns off the normal displaying. + +\subsection{\tt Unset Silent.}\comindex{End Silent} +This command turns the normal display on. + +\subsection{\tt Set Printing Width {\integer}.}\comindex{Set Printing Width} +This command sets which left-aligned part of the width of the screen +is used for display. + +\subsection{\tt Unset Printing Width.}\comindex{Unset Printing Width} +This command resets the width of the screen used for display to its +default value (which is 78 at the time of writing this documentation). + +\subsection{\tt Test Printing Width.}\comindex{Test Printing Width} +This command displays the current screen width used for display. + +\subsection{\tt Set Printing Depth {\integer}.}\comindex{Set Printing Depth} +This command sets the nesting depth of the formatter used for +pretty-printing. Beyond this depth, display of subterms is replaced by +dots. + +\subsection{\tt Unset Printing Depth.}\comindex{Unset Printing Depth} +This command resets the nesting depth of the formatter used for +pretty-printing to its default value (at the +time of writing this documentation, the default value is 50). + +\subsection{\tt Test Printing Depth.}\comindex{Test Printing Depth} +This command displays the current nesting depth used for display. + +%\subsection{\tt Explain ...} +%Not yet documented. + +%\subsection{\tt Go ...} +%Not yet documented. + +%\subsection{\tt Abstraction ...} +%Not yet documented. + +% $Id: RefMan-oth.tex 8606 2006-02-23 13:58:10Z herbelin $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-pre.tex b/doc/refman/RefMan-pre.tex new file mode 100644 index 00000000..f8a9622c --- /dev/null +++ b/doc/refman/RefMan-pre.tex @@ -0,0 +1,582 @@ +\setheaders{Credits} +\chapter*{Credits} +%\addcontentsline{toc}{section}{Credits} + +\Coq{}~ is a proof assistant for higher-order logic, allowing the +development of computer programs consistent with their formal +specification. It is the result of about ten years of research of the +Coq project. We shall briefly survey here three main aspects: the +\emph{logical language} in which we write our axiomatizations and +specifications, the \emph{proof assistant} which allows the development +of verified mathematical proofs, and the \emph{program extractor} which +synthesizes computer programs obeying their formal specifications, +written as logical assertions in the language. + +The logical language used by {\Coq} is a variety of type theory, +called the \emph{Calculus of Inductive Constructions}. Without going +back to Leibniz and Boole, we can date the creation of what is now +called mathematical logic to the work of Frege and Peano at the turn +of the century. The discovery of antinomies in the free use of +predicates or comprehension principles prompted Russell to restrict +predicate calculus with a stratification of \emph{types}. This effort +culminated with \emph{Principia Mathematica}, the first systematic +attempt at a formal foundation of mathematics. A simplification of +this system along the lines of simply typed $\lambda$-calculus +occurred with Church's \emph{Simple Theory of Types}. The +$\lambda$-calculus notation, originally used for expressing +functionality, could also be used as an encoding of natural deduction +proofs. This Curry-Howard isomorphism was used by N. de Bruijn in the +\emph{Automath} project, the first full-scale attempt to develop and +mechanically verify mathematical proofs. This effort culminated with +Jutting's verification of Landau's \emph{Grundlagen} in the 1970's. +Exploiting this Curry-Howard isomorphism, notable achievements in +proof theory saw the emergence of two type-theoretic frameworks; the +first one, Martin-L\"of's \emph{Intuitionistic Theory of Types}, +attempts a new foundation of mathematics on constructive principles. +The second one, Girard's polymorphic $\lambda$-calculus $F_\omega$, is +a very strong functional system in which we may represent higher-order +logic proof structures. Combining both systems in a higher-order +extension of the Automath languages, T. Coquand presented in 1985 the +first version of the \emph{Calculus of Constructions}, CoC. This strong +logical system allowed powerful axiomatizations, but direct inductive +definitions were not possible, and inductive notions had to be defined +indirectly through functional encodings, which introduced +inefficiencies and awkwardness. The formalism was extended in 1989 by +T. Coquand and C. Paulin with primitive inductive definitions, leading +to the current \emph{Calculus of Inductive Constructions}. This +extended formalism is not rigorously defined here. Rather, numerous +concrete examples are discussed. We refer the interested reader to +relevant research papers for more information about the formalism, its +meta-theoretic properties, and semantics. However, it should not be +necessary to understand this theoretical material in order to write +specifications. It is possible to understand the Calculus of Inductive +Constructions at a higher level, as a mixture of predicate calculus, +inductive predicate definitions presented as typed PROLOG, and +recursive function definitions close to the language ML. + +Automated theorem-proving was pioneered in the 1960's by Davis and +Putnam in propositional calculus. A complete mechanization (in the +sense of a semi-decision procedure) of classical first-order logic was +proposed in 1965 by J.A. Robinson, with a single uniform inference +rule called \emph{resolution}. Resolution relies on solving equations +in free algebras (i.e. term structures), using the \emph{unification + algorithm}. Many refinements of resolution were studied in the +1970's, but few convincing implementations were realized, except of +course that PROLOG is in some sense issued from this effort. A less +ambitious approach to proof development is computer-aided +proof-checking. The most notable proof-checkers developed in the +1970's were LCF, designed by R. Milner and his colleagues at U. +Edinburgh, specialized in proving properties about denotational +semantics recursion equations, and the Boyer and Moore theorem-prover, +an automation of primitive recursion over inductive data types. While +the Boyer-Moore theorem-prover attempted to synthesize proofs by a +combination of automated methods, LCF constructed its proofs through +the programming of \emph{tactics}, written in a high-level functional +meta-language, ML. + +The salient feature which clearly distinguishes our proof assistant +from say LCF or Boyer and Moore's, is its possibility to extract +programs from the constructive contents of proofs. This computational +interpretation of proof objects, in the tradition of Bishop's +constructive mathematics, is based on a realizability interpretation, +in the sense of Kleene, due to C. Paulin. The user must just mark his +intention by separating in the logical statements the assertions +stating the existence of a computational object from the logical +assertions which specify its properties, but which may be considered +as just comments in the corresponding program. Given this information, +the system automatically extracts a functional term from a consistency +proof of its specifications. This functional term may be in turn +compiled into an actual computer program. This methodology of +extracting programs from proofs is a revolutionary paradigm for +software engineering. Program synthesis has long been a theme of +research in artificial intelligence, pioneered by R. Waldinger. The +Tablog system of Z. Manna and R. Waldinger allows the deductive +synthesis of functional programs from proofs in tableau form of their +specifications, written in a variety of first-order logic. Development +of a systematic \emph{programming logic}, based on extensions of +Martin-L\"of's type theory, was undertaken at Cornell U. by the Nuprl +team, headed by R. Constable. The first actual program extractor, PX, +was designed and implemented around 1985 by S. Hayashi from Kyoto +University. It allows the extraction of a LISP program from a proof +in a logical system inspired by the logical formalisms of S. Feferman. +Interest in this methodology is growing in the theoretical computer +science community. We can foresee the day when actual computer systems +used in applications will contain certified modules, automatically +generated from a consistency proof of their formal specifications. We +are however still far from being able to use this methodology in a +smooth interaction with the standard tools from software engineering, +i.e. compilers, linkers, run-time systems taking advantage of special +hardware, debuggers, and the like. We hope that {\Coq} can be of use +to researchers interested in experimenting with this new methodology. + +A first implementation of CoC was started in 1984 by G. Huet and T. +Coquand. Its implementation language was CAML, a functional +programming language from the ML family designed at INRIA in +Rocquencourt. The core of this system was a proof-checker for CoC seen +as a typed $\lambda$-calculus, called the \emph{Constructive Engine}. +This engine was operated through a high-level notation permitting the +declaration of axioms and parameters, the definition of mathematical +types and objects, and the explicit construction of proof objects +encoded as $\lambda$-terms. A section mechanism, designed and +implemented by G. Dowek, allowed hierarchical developments of +mathematical theories. This high-level language was called the +\emph{Mathematical Vernacular}. Furthermore, an interactive +\emph{Theorem Prover} permitted the incremental construction of proof +trees in a top-down manner, subgoaling recursively and backtracking +from dead-alleys. The theorem prover executed tactics written in CAML, +in the LCF fashion. A basic set of tactics was predefined, which the +user could extend by his own specific tactics. This system (Version +4.10) was released in 1989. Then, the system was extended to deal +with the new calculus with inductive types by C. Paulin, with +corresponding new tactics for proofs by induction. A new standard set +of tactics was streamlined, and the vernacular extended for tactics +execution. A package to compile programs extracted from proofs to +actual computer programs in CAML or some other functional language was +designed and implemented by B. Werner. A new user-interface, relying +on a CAML-X interface by D. de Rauglaudre, was designed and +implemented by A. Felty. It allowed operation of the theorem-prover +through the manipulation of windows, menus, mouse-sensitive buttons, +and other widgets. This system (Version 5.6) was released in 1991. + +\Coq{} was ported to the new implementation Caml-light of X. Leroy and +D. Doligez by D. de Rauglaudre (Version 5.7) in 1992. A new version +of \Coq{} was then coordinated by C. Murthy, with new tools designed +by C. Parent to prove properties of ML programs (this methodology is +dual to program extraction) and a new user-interaction loop. This +system (Version 5.8) was released in May 1993. A Centaur interface +\textsc{CTCoq} was then developed by Y. Bertot from the Croap project +from INRIA-Sophia-Antipolis. + +In parallel, G. Dowek and H. Herbelin developed a new proof engine, +allowing the general manipulation of existential variables +consistently with dependent types in an experimental version of \Coq{} +(V5.9). + +The version V5.10 of \Coq{} is based on a generic system for +manipulating terms with binding operators due to Chet Murthy. A new +proof engine allows the parallel development of partial proofs for +independent subgoals. The structure of these proof trees is a mixed +representation of derivation trees for the Calculus of Inductive +Constructions with abstract syntax trees for the tactics scripts, +allowing the navigation in a proof at various levels of details. The +proof engine allows generic environment items managed in an +object-oriented way. This new architecture, due to C. Murthy, +supports several new facilities which make the system easier to extend +and to scale up: + +\begin{itemize} +\item User-programmable tactics are allowed +\item It is possible to separately verify development modules, and to + load their compiled images without verifying them again - a quick + relocation process allows their fast loading +\item A generic parsing scheme allows user-definable notations, with a + symmetric table-driven pretty-printer +\item Syntactic definitions allow convenient abbreviations +\item A limited facility of meta-variables allows the automatic + synthesis of certain type expressions, allowing generic notations + for e.g. equality, pairing, and existential quantification. +\end{itemize} + +In the Fall of 1994, C. Paulin-Mohring replaced the structure of +inductively defined types and families by a new structure, allowing +the mutually recursive definitions. P. Manoury implemented a +translation of recursive definitions into the primitive recursive +style imposed by the internal recursion operators, in the style of the +ProPre system. C. Mu{\~n}oz implemented a decision procedure for +intuitionistic propositional logic, based on results of R. Dyckhoff. +J.C. Filli{\^a}tre implemented a decision procedure for first-order +logic without contraction, based on results of J. Ketonen and R. +Weyhrauch. Finally C. Murthy implemented a library of inversion +tactics, relieving the user from tedious definitions of ``inversion +predicates''. + +\begin{flushright} +Rocquencourt, Feb. 1st 1995\\ +Gérard Huet +\end{flushright} + +\section*{Credits: addendum for version 6.1} +%\addcontentsline{toc}{section}{Credits: addendum for version V6.1} + +The present version 6.1 of \Coq{} is based on the V5.10 architecture. It +was ported to the new language Objective Caml by Bruno Barras. The +underlying framework has slightly changed and allows more conversions +between sorts. + +The new version provides powerful tools for easier developments. + +Cristina Cornes designed an extension of the \Coq{} syntax to allow +definition of terms using a powerful pattern-matching analysis in the +style of ML programs. + +Amokrane Saïbi wrote a mechanism to simulate +inheritance between types families extending a proposal by Peter +Aczel. He also developed a mechanism to automatically compute which +arguments of a constant may be inferred by the system and consequently +do not need to be explicitly written. + +Yann Coscoy designed a command which explains a proof term using +natural language. Pierre Cr{\'e}gut built a new tactic which solves +problems in quantifier-free Presburger Arithmetic. Both +functionalities have been integrated to the \Coq{} system by Hugo +Herbelin. + +Samuel Boutin designed a tactic for simplification of commutative +rings using a canonical set of rewriting rules and equality modulo +associativity and commutativity. + +Finally the organisation of the \Coq{} distribution has been supervised +by Jean-Christophe Filliâtre with the help of Judicaël Courant +and Bruno Barras. + +\begin{flushright} +Lyon, Nov. 18th 1996\\ +Christine Paulin +\end{flushright} + +\section*{Credits: addendum for version 6.2} +%\addcontentsline{toc}{section}{Credits: addendum for version V6.2} + +In version 6.2 of \Coq{}, the parsing is done using camlp4, a +preprocessor and pretty-printer for CAML designed by Daniel de +Rauglaudre at INRIA. Daniel de Rauglaudre made the first adaptation +of \Coq{} for camlp4, this work was continued by Bruno Barras who also +changed the structure of \Coq{} abstract syntax trees and the primitives +to manipulate them. The result of +these changes is a faster parsing procedure with greatly improved +syntax-error messages. The user-interface to introduce grammar or +pretty-printing rules has also changed. + +Eduardo Giménez redesigned the internal +tactic libraries, giving uniform names +to Caml functions corresponding to \Coq{} tactic names. + +Bruno Barras wrote new more efficient reductions functions. + +Hugo Herbelin introduced more uniform notations in the \Coq{} +specification language: the definitions by fixpoints and +pattern-matching have a more readable syntax. Patrick Loiseleur +introduced user-friendly notations for arithmetic expressions. + +New tactics were introduced: Eduardo Giménez improved a mechanism to +introduce macros for tactics, and designed special tactics for +(co)inductive definitions; Patrick Loiseleur designed a tactic to +simplify polynomial expressions in an arbitrary commutative ring which +generalizes the previous tactic implemented by Samuel Boutin. +Jean-Christophe Filli\^atre introduced a tactic for refining a goal, +using a proof term with holes as a proof scheme. + +David Delahaye designed the \textsf{SearchIsos} tool to search an +object in the library given its type (up to isomorphism). + +Henri Laulhère produced the \Coq{} distribution for the Windows environment. + +Finally, Hugo Herbelin was the main coordinator of the \Coq{} +documentation with principal contributions by Bruno Barras, David Delahaye, +Jean-Christophe Filli\^atre, Eduardo +Giménez, Hugo Herbelin and Patrick Loiseleur. + +\begin{flushright} +Orsay, May 4th 1998\\ +Christine Paulin +\end{flushright} + +\section*{Credits: addendum for version 6.3} +The main changes in version V6.3 was the introduction of a few new tactics +and the extension of the guard condition for fixpoint definitions. + + +B. Barras extended the unification algorithm to complete partial terms +and solved various tricky bugs related to universes.\\ +D. Delahaye developed the \texttt{AutoRewrite} tactic. He also designed the new +behavior of \texttt{Intro} and provided the tacticals \texttt{First} and +\texttt{Solve}.\\ +J.-C. Filli\^atre developed the \texttt{Correctness} tactic.\\ +E. Gim\'enez extended the guard condition in fixpoints.\\ +H. Herbelin designed the new syntax for definitions and extended the +\texttt{Induction} tactic.\\ +P. Loiseleur developed the \texttt{Quote} tactic and +the new design of the \texttt{Auto} +tactic, he also introduced the index of +errors in the documentation.\\ +C. Paulin wrote the \texttt{Focus} command and introduced +the reduction functions in definitions, this last feature +was proposed by J.-F. Monin from CNET Lannion. + +\begin{flushright} +Orsay, Dec. 1999\\ +Christine Paulin +\end{flushright} + +%\newpage + +\section*{Credits: versions 7} + +The version V7 is a new implementation started in September 1999 by +Jean-Christophe Filliâtre. This is a major revision with respect to +the internal architecture of the system. The \Coq{} version 7.0 was +distributed in March 2001, version 7.1 in September 2001, version +7.2 in January 2002, version 7.3 in May 2002 and version 7.4 in +February 2003. + +Jean-Christophe Filliâtre designed the architecture of the new system, he +introduced a new representation for environments and wrote a new kernel +for type-checking terms. His approach was to use functional +data-structures in order to get more sharing, to prepare the addition +of modules and also to get closer to a certified kernel. + +Hugo Herbelin introduced a new structure of terms with local +definitions. He introduced ``qualified'' names, wrote a new +pattern-matching compilation algorithm and designed a more compact +algorithm for checking the logical consistency of universes. He +contributed to the simplification of {\Coq} internal structures and the +optimisation of the system. He added basic tactics for forward +reasoning and coercions in patterns. + +David Delahaye introduced a new language for tactics. General tactics +using pattern-matching on goals and context can directly be written +from the {\Coq} toplevel. He also provided primitives for the design +of user-defined tactics in \textsc{Caml}. + +Micaela Mayero contributed the library on real numbers. +Olivier Desmettre extended this library with axiomatic +trigonometric functions, square, square roots, finite sums, Chasles +property and basic plane geometry. + +Jean-Christophe Filliâtre and Pierre Letouzey redesigned a new +extraction procedure from \Coq{} terms to \textsc{Caml} or +\textsc{Haskell} programs. This new +extraction procedure, unlike the one implemented in previous version +of \Coq{} is able to handle all terms in the Calculus of Inductive +Constructions, even involving universes and strong elimination. P. +Letouzey adapted user contributions to extract ML programs when it was +sensible. +Jean-Christophe Filliâtre wrote \verb=coqdoc=, a documentation +tool for {\Coq} libraries usable from version 7.2. + +Bruno Barras improved the reduction algorithms efficiency and +the confidence level in the correctness of {\Coq} critical type-checking +algorithm. + +Yves Bertot designed the \texttt{SearchPattern} and +\texttt{SearchRewrite} tools and the support for the \textsc{pcoq} interface +(\url{http://www-sop.inria.fr/lemme/pcoq/}). + +Micaela Mayero and David Delahaye introduced {\tt Field}, a decision tactic for commutative fields. + +Christine Paulin changed the elimination rules for empty and singleton +propositional inductive types. + +Loïc Pottier developed {\tt Fourier}, a tactic solving linear inequalities on real numbers. + +Pierre Crégut developed a new version based on reflexion of the {\tt Omega} +decision tactic. + +Claudio Sacerdoti Coen designed an XML output for the {\Coq} +modules to be used in the Hypertextual Electronic Library of +Mathematics (HELM cf \url{http://www.cs.unibo.it/helm}). + +A library for efficient representation of finite maps using binary trees +contributed by Jean Goubault was integrated in the basic theories. + +Jacek Chrz\k{a}szcz designed and implemented the module system of +{\Coq} whose foundations are in Judicaël Courant's PhD thesis. + +\bigskip + +The development was coordinated by C. Paulin. + +Many discussions within the Démons team and the LogiCal project +influenced significantly the design of {\Coq} especially with +%J. Chrz\k{a}szcz, +J. Courant, P. Courtieu, J. Duprat, J. Goubault, A. Miquel, +C. Marché, B. Monate and B. Werner. + +Intensive users suggested improvements of the system : +Y. Bertot, L. Pottier, L. Théry , P. Zimmerman from INRIA, +C. Alvarado, P. Crégut, J.-F. Monin from France Telecom R \& D. +\begin{flushright} +Orsay, May. 2002\\ +Hugo Herbelin \& Christine Paulin +\end{flushright} + +\section*{Credits: version 8.0} + +{\Coq} version 8 is a major revision of the {\Coq} proof assistant. +First, the underlying logic is slightly different. The so-called {\em +impredicativity} of the sort {\tt Set} has been dropped. The main +reason is that it is inconsistent with the principle of description +which is quite a useful principle for formalizing %classical +mathematics within classical logic. Moreover, even in an constructive +setting, the impredicativity of {\tt Set} does not add so much in +practice and is even subject of criticism from a large part of the +intuitionistic mathematician community. Nevertheless, the +impredicativity of {\tt Set} remains optional for users interested in +investigating mathematical developments which rely on it. + +Secondly, the concrete syntax of terms has been completely +revised. The main motivations were + +\begin{itemize} +\item a more uniform, purified style: all constructions are now lowercase, + with a functional programming perfume (e.g. abstraction is now + written {\tt fun}), and more directly accessible to the novice + (e.g. dependent product is now written {\tt forall} and allows + omission of types). Also, parentheses and are no longer mandatory + for function application. +\item extensibility: some standard notations (e.g. ``<'' and ``>'') were + incompatible with the previous syntax. Now all standard arithmetic + notations (=, +, *, /, <, <=, ... and more) are directly part of the + syntax. +\end{itemize} + +Together with the revision of the concrete syntax, a new mechanism of +{\em interpretation scopes} permits to reuse the same symbols +(typically +, -, *, /, <, <=) in various mathematical theories without +any ambiguities for {\Coq}, leading to a largely improved readability of +{\Coq} scripts. New commands to easily add new symbols are also +provided. + +Coming with the new syntax of terms, a slight reform of the tactic +language and of the language of commands has been carried out. The +purpose here is a better uniformity making the tactics and commands +easier to use and to remember. + +Thirdly, a restructuration and uniformisation of the standard library +of {\Coq} has been performed. There is now just one Leibniz' equality +usable for all the different kinds of {\Coq} objects. Also, the set of +real numbers now lies at the same level as the sets of natural and +integer numbers. Finally, the names of the standard properties of +numbers now follow a standard pattern and the symbolic +notations for the standard definitions as well. + +The fourth point is the release of \CoqIDE{}, a new graphical +gtk2-based interface fully integrated to {\Coq}. Close in style from +the Proof General Emacs interface, it is faster and its integration +with {\Coq} makes interactive developments more friendly. All +mathematical Unicode symbols are usable within \CoqIDE{}. + +Finally, the module system of {\Coq} completes the picture of {\Coq} +version 8.0. Though released with an experimental status in the previous +version 7.4, it should be considered as a salient feature of the new +version. + +Besides, {\Coq} comes with its load of novelties and improvements: new +or improved tactics (including a new tactic for solving first-order +statements), new management commands, extended libraries. + +\bigskip + +Bruno Barras and Hugo Herbelin have been the main contributors of the +reflexion and the implementation of the new syntax. The smart +automatic translator from old to new syntax released with {\Coq} is also +their work with contributions by Olivier Desmettre. + +Hugo Herbelin is the main designer and implementor of the notion of +interpretation scopes and of the commands for easily adding new notations. + +Hugo Herbelin is the main implementor of the restructuration of the +standard library. + +Pierre Corbineau is the main designer and implementor of the new +tactic for solving first-order statements in presence of inductive +types. He is also the maintainer of the non-domain specific automation +tactics. + +Benjamin Monate is the developer of the \CoqIDE{} graphical +interface with contributions by Jean-Christophe Filliâtre, Pierre +Letouzey, Claude Marché and Bruno Barras. + +Claude Marché coordinated the edition of the Reference Manual for + \Coq{} V8.0. + +Pierre Letouzey and Jacek Chrz\k{a}szcz respectively maintained the +extraction tool and module system of {\Coq}. + +Jean-Christophe Filliâtre, Pierre Letouzey, Hugo Herbelin and +contributors from Sophia-Antipolis and Nijmegen participated to the +extension of the library. + +Julien Narboux built a NSIS-based automatic {\Coq} installation tool for +the Windows platform. + +Hugo Herbelin and Christine Paulin coordinated the development which +was under the responsability of Christine Paulin. + +\begin{flushright} +Palaiseau \& Orsay, Apr. 2004\\ +Hugo Herbelin \& Christine Paulin\\ +(updated Apr. 2006) +\end{flushright} + +\section*{Credits: version 8.1} + +{\Coq} version 8.1 adds various new functionalities. + +Benjamin Grégoire implemented an alternative algorithm to check the +convertibility of terms in the {\Coq} type-checker. This alternative +algorithm works by compilation to an efficient bytecode that is +interpreted in an abstract machine similar to Xavier Leroy's ZINC +machine. Convertibility is performed by comparing the normal +forms. This alternative algorithm is specifically interesting for +proofs by reflection. More generally, it is convenient in case of +intensive computations. + +Christine Paulin implemented an extension of inductive types allowing +recursively non uniform parameters. Hugo Herbelin implemented +sort-polymorphism for inductive types. + +Claudio Sacerdoti Coen improved the tactics for rewriting on arbitrary +compatible equivalence relations. He also generalized rewriting to +arbitrary transition systems. + +Claudio Sacerdoti Coen added new features to the module system. + +Benjamin Grégoire, Assia Mahboubi and Bruno Barras developed a new +more efficient and more general simplification algorithm on rings and +semi-rings. + +Hugo Herbelin, Pierre Letouzey and Claudio Sacerdoti Coen added new +tactic features. + +Hugo Herbelin implemented matching on disjunctive patterns. + +New mechanisms made easier the communication between {\Coq} and external +provers. Nicolas Ayache and Jean-Christophe Filliâtre implemented +connections with the provers {\sc cvcl}, {\sc Simplify} and {\sc +zenon}. Hugo Herbelin implemented an experimental protocol for calling +external tools from the tactic language. + +%Matthieu Sozeau developed an experimental language to reason over subtypes. + +A mechanism to automatically use some specific tactic to solve +unresolved implicit has been implemented by Hugo Herbelin. + +Laurent Théry's contribution on strings and Pierre Letouzey and +Jean-Christophe Filliâtre's contribution on finite maps have been +integrated to the {\Coq} standard library. Pierre Letouzey developed a +library about finite sets ``à la Objective Caml'' and extended the +lists library. + +Pierre Corbineau extended his tactic for solving first-order +statements. He wrote a reflexion-based intuitionistic tautology +solver. + +Jean-Marc Notin took care of the general maintenance of the system. + +\begin{flushright} +Palaiseau, Apr. 2006\\ +Hugo Herbelin +\end{flushright} + +%\newpage + +% Integration of ZArith lemmas from Sophia and Nijmegen. + + +% $Id: RefMan-pre.tex 8707 2006-04-13 18:23:35Z herbelin $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-pro.tex b/doc/refman/RefMan-pro.tex new file mode 100644 index 00000000..739ca6b5 --- /dev/null +++ b/doc/refman/RefMan-pro.tex @@ -0,0 +1,389 @@ +\chapter{Proof handling} +\index{Proof editing} +\label{Proof-handling} + +In \Coq's proof editing mode all top-level commands documented in +chapter \ref{Vernacular-commands} remain available +and the user has access to specialized commands dealing with proof +development pragmas documented in this section. He can also use some +other specialized commands called {\em tactics}. They are the very +tools allowing the user to deal with logical reasoning. They are +documented in chapter \ref{Tactics}.\\ +When switching in editing proof mode, the prompt +\index{Prompt} +{\tt Coq <} is changed into {\tt {\ident} <} where {\ident} is the +declared name of the theorem currently edited. + +At each stage of a proof development, one has a list of goals to +prove. Initially, the list consists only in the theorem itself. After +having applied some tactics, the list of goals contains the subgoals +generated by the tactics. + +To each subgoal is associated a number of +hypotheses we call the {\em \index*{local context}} of the goal. +Initially, the local context is empty. It is enriched by the use of +certain tactics (see mainly section~\ref{intro}). + +When a proof is achieved the message {\tt Proof completed} is +displayed. One can then store this proof as a defined constant in the +environment. Because there exists a correspondence between proofs and +terms of $\lambda$-calculus, known as the {\em Curry-Howard +isomorphism} \cite{How80,Bar91,Gir89,Hue89}, \Coq~ stores proofs as +terms of {\sc Cic}. Those terms are called {\em proof + terms}\index{Proof term}. + +It is possible to edit several proofs at the same time: see section +\ref{Resume} + +\ErrMsg When one attempts to use a proof editing command out of the +proof editing mode, \Coq~ raises the error message : \errindex{No focused + proof}. + +\section{Switching on/off the proof editing mode} + +\subsection{\tt Goal {\form}.} +\comindex{Goal}\label{Goal} +This command switches \Coq~ to editing proof mode and sets {\form} as +the original goal. It associates the name {\tt Unnamed\_thm} to +that goal. + +\begin{ErrMsgs} +\item \errindex{the term \form\ has type \ldots{} which should be Set, + Prop or Type} +%\item \errindex{Proof objects can only be abstracted} +%\item \errindex{A goal should be a type} +%\item \errindex{repeated goal not permitted in refining mode} +%the command {\tt Goal} cannot be used while a proof is already being edited. +\end{ErrMsgs} + +\SeeAlso section \ref{Theorem} + +\subsection{\tt Qed.}\comindex{Qed}\label{Qed} +This command is available in interactive editing proof mode when the +proof is completed. Then {\tt Qed} extracts a proof term from the +proof script, switches back to {\Coq} top-level and attaches the +extracted proof term to the declared name of the original goal. This +name is added to the environment as an {\tt Opaque} constant. + +\begin{ErrMsgs} +\item \errindex{Attempt to save an incomplete proof} +%\item \ident\ \errindex{already exists}\\ +% The implicit name is already defined. You have then to provide +% explicitly a new name (see variant 3 below). +\item Sometimes an error occurs when building the proof term, +because tactics do not enforce completely the term construction +constraints. + +The user should also be aware of the fact that since the proof term is +completely rechecked at this point, one may have to wait a while when +the proof is large. In some exceptional cases one may even incur a +memory overflow. +\end{ErrMsgs} + +\begin{Variants} + +\item {\tt Defined.} +\comindex{Defined} +\label{Defined} + + Defines the proved term as a transparent constant. + +\item {\tt Save.} +\comindex{Save} + + Is equivalent to {\tt Qed}. + +\item {\tt Save {\ident}.} + + Forces the name of the original goal to be {\ident}. This command + (and the following ones) can only be used if the original goal has + been opened using the {\tt Goal} command. + +\item {\tt Save Theorem {\ident}.} \\ + {\tt Save Lemma {\ident}.} \\ + {\tt Save Remark {\ident}.}\\ + {\tt Save Fact {\ident}.} + + Are equivalent to {\tt Save {\ident}.} +\end{Variants} + +\subsection{\tt Admitted.}\comindex{Admitted}\label{Admitted} +This command is available in interactive editing proof mode to give up +the current proof and declare the initial goal as an axiom. + +\subsection{\tt Theorem {\ident} : {\form}.} +\comindex{Theorem} +\label{Theorem} + +This command switches to interactive editing proof mode and declares +{\ident} as being the name of the original goal {\form}. When declared +as a {\tt Theorem}, the name {\ident} is known at all section levels: +{\tt Theorem} is a {\sl global} lemma. + +%\ErrMsg (see section \ref{Goal}) + +\begin{ErrMsgs} + +\item \errindex{the term \form\ has type \ldots{} which should be Set, + Prop or Type} + +\item \errindexbis{\ident already exists}{already exists} + + The name you provided already defined. You have then to choose + another name. + +\end{ErrMsgs} + + +\begin{Variants} + +\item {\tt Lemma {\ident} : {\form}.} +\comindex{Lemma} + + It is equivalent to {\tt Theorem {\ident} : {\form}.} + +\item {\tt Remark {\ident} : {\form}.}\comindex{Remark}\\ + {\tt Fact {\ident} : {\form}.}\comindex{Fact} + + Used to have a different meaning, but are now equivalent to {\tt + Theorem {\ident} : {\form}.} They are kept for compatibility. + +\item {\tt Definition {\ident} : {\form}.} +\comindex{Definition} + + Analogous to {\tt Theorem}, intended to be used in conjunction with + {\tt Defined} (see \ref{Defined}) in order to define a + transparent constant. + +\item {\tt Let {\ident} : {\form}.} +\comindex{Let} + + Analogous to {\tt Definition} except that the definition is turned + into a local definition on objects depending on it after closing the + current section. +\end{Variants} + +\subsection{\tt Proof {\term}.}\comindex{Proof} +This command applies in proof editing mode. It is equivalent to {\tt + exact {\term}; Save.} That is, you have to give the full proof in +one gulp, as a proof term (see section \ref{exact}). + +\begin{Variants} + +\item{\tt Proof.} + + Is a noop which is useful to delimit the sequence of tactic commands + which start a proof, after a {\tt Theorem} command. It is a good + practice to use {\tt Proof.} as an opening parenthesis, closed in + the script with a closing {\tt Qed.} + +\item{\tt Proof with {\tac}.} + + This command may be used to start a proof. It defines a default + tactic to be used each time a tactic command is ended by + ``\verb#...#''. In this case the tactic command typed by the user is + equivalent to \emph{command};{\tac}. + +\end{Variants} + +\subsection{\tt Abort.} +\comindex{Abort} + +This command cancels the current proof development, switching back to +the previous proof development, or to the \Coq\ toplevel if no other +proof was edited. + +\begin{ErrMsgs} +\item \errindex{No focused proof (No proof-editing in progress)} +\end{ErrMsgs} + +\begin{Variants} + +\item {\tt Abort {\ident}.} + + Aborts the editing of the proof named {\ident}. + +\item {\tt Abort All.} + + Aborts all current goals, switching back to the \Coq\ toplevel. + +\end{Variants} + +\subsection{\tt Suspend.} +\comindex{Suspend} + +This command applies in proof editing mode. It switches back to the +\Coq\ toplevel, but without canceling the current proofs. + +\subsection{\tt Resume.} +\comindex{Resume}\label{Resume} + +This commands switches back to the editing of the last edited proof. + +\begin{ErrMsgs} +\item \errindex{No proof-editing in progress} +\end{ErrMsgs} + +\begin{Variants} + +\item {\tt Resume {\ident}.} + + Restarts the editing of the proof named {\ident}. This can be used + to navigate between currently edited proofs. + +\end{Variants} + +\begin{ErrMsgs} +\item \errindex{No such proof} +\end{ErrMsgs} + +\section{Navigation in the proof tree} + +\subsection{\tt Undo.} +\comindex{Undo} + +This command cancels the effect of the last tactic command. Thus, it +backtracks one step. + +\begin{ErrMsgs} +\item \errindex{No focused proof (No proof-editing in progress)} +\item \errindex{Undo stack would be exhausted} +\end{ErrMsgs} + +\begin{Variants} + +\item {\tt Undo {\num}.} + + Repeats {\tt Undo} {\num} times. + +\end{Variants} + +\subsection{\tt Set Undo {\num}.} +\comindex{Set Undo} + +This command changes the maximum number of {\tt Undo}'s that will be +possible when doing a proof. It only affects proofs started after +this command, such that if you want to change the current undo limit +inside a proof, you should first restart this proof. + +\subsection{\tt Unset Undo.} +\comindex{Unset Undo} + +This command resets the default number of possible {\tt Undo} commands +(which is currently 12). + +\subsection{\tt Restart.}\comindex{Restart} +This command restores the proof editing process to the original goal. + +\begin{ErrMsgs} +\item \errindex{No focused proof to restart} +\end{ErrMsgs} + +\subsection{\tt Focus.}\comindex{Focus} +This focuses the attention on the first subgoal to prove and the printing +of the other subgoals is suspended until the focused subgoal is +solved or unfocused. This is useful when there are many current +subgoals which clutter your screen. + +\begin{Variant} +\item {\tt Focus {\num}.}\\ +This focuses the attention on the $\num^{\scriptsize th}$ subgoal to prove. + +\end{Variant} + +\subsection{\tt Unfocus.}\comindex{Unfocus} +Turns off the focus mode. + + +\section{Displaying information} + +\subsection{\tt Show.}\comindex{Show}\label{Show} +This command displays the current goals. + +\begin{Variants} +\item {\tt Show {\num}.}\\ + Displays only the {\num}-th subgoal.\\ +\begin{ErrMsgs} +\item \errindex{No such goal} +\item \errindex{No focused proof} +\end{ErrMsgs} + +\item {\tt Show Implicits.}\comindex{Show Implicits}\\ + Displays the current goals, printing the implicit arguments of + constants. + +\item {\tt Show Implicits {\num}.}\\ + Same as above, only displaying the {\num}-th subgoal. + +\item {\tt Show Script.}\comindex{Show Script}\\ + Displays the whole list of tactics applied from the beginning + of the current proof. + This tactics script may contain some holes (subgoals not yet proved). + They are printed under the form \verb!!. + +\item {\tt Show Tree.}\comindex{Show Tree}\\ +This command can be seen as a more structured way of +displaying the state of the proof than that +provided by {\tt Show Script}. Instead of just giving +the list of tactics that have been applied, it +shows the derivation tree constructed by then. +Each node of the tree contains the conclusion +of the corresponding sub-derivation (i.e. a +goal with its corresponding local context) and +the tactic that has generated all the +sub-derivations. The leaves of this tree are +the goals which still remain to be proved. + +%\item {\tt Show Node}\comindex{Show Node}\\ +% Not yet documented + +\item {\tt Show Proof.}\comindex{Show Proof}\\ +It displays the proof term generated by the +tactics that have been applied. +If the proof is not completed, this term contain holes, +which correspond to the sub-terms which are still to be +constructed. These holes appear as a question mark indexed +by an integer, and applied to the list of variables in +the context, since it may depend on them. +The types obtained by abstracting away the context from the +type of each hole-placer are also printed. + +\item {\tt Show Conjectures.}\comindex{Show Conjectures}\\ +It prints the list of the names of all the theorems that +are currently being proved. +As it is possible to start proving a previous lemma during +the proof of a theorem, this list may contain several +names. + +\item{\tt Show Intro.}\comindex{Show Intro}\\ +If the current goal begins by at least one product, this command +prints the name of the first product, as it would be generated by +an anonymous {\tt Intro}. The aim of this command is to ease the +writing of more robust scripts. For example, with an appropriate +Proof General macro, it is possible to transform any anonymous {\tt + Intro} into a qualified one such as {\tt Intro y13}. +In the case of a non-product goal, it prints nothing. + +\item{\tt Show Intros.}\comindex{Show Intros}\\ +This command is similar to the previous one, it simulates the naming +process of an {\tt Intros}. + +\end{Variants} + +\subsection{\tt Set Hyps Limit {\num}.} +\comindex{Set Hyps Limit} +This command sets the maximum number of hypotheses displayed in +goals after the application of a tactic. +All the hypotheses remains usable in the proof development. + +\subsection{\tt Unset Hyps Limit.} +\comindex{Unset Hyps Limit} +This command goes back to the default mode which is to print all +available hypotheses. + +% $Id: RefMan-pro.tex 8609 2006-02-24 13:32:57Z notin,no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty $ + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "Reference-Manual" +%%% End: diff --git a/doc/refman/RefMan-syn.tex b/doc/refman/RefMan-syn.tex new file mode 100644 index 00000000..341e766e --- /dev/null +++ b/doc/refman/RefMan-syn.tex @@ -0,0 +1,1016 @@ +\chapter{Syntax extensions and interpretation scopes} +\label{Addoc-syntax} + +In this chapter, we introduce advanced commands to modify the way +{\Coq} parses and prints objects, i.e. the translations between the +concrete and internal representations of terms and commands. The main +commands are {\tt Notation} and {\tt Infix} which are described in +section \ref{Notation}. It also happens that the same symbolic +notation is expected in different contexts. To achieve this form of +overloading, {\Coq} offers a notion of interpretation scope. This is +described in section \ref{scopes}. + +\Rem The commands {\tt Grammar}, {\tt Syntax} and {\tt Distfix} which +were present for a while in {\Coq} are no longer available from {\Coq} +version 8.0. The underlying AST structure is also no longer available. +The functionalities of the command {\tt Syntactic Definition} are +still available, see section \ref{Abbreviations}. + +\section{Notations} +\label{Notation} +\comindex{Notation} + +\subsection{Basic notations} + +A {\em notation} is a symbolic abbreviation denoting some term +or term pattern. + +A typical notation is the use of the infix symbol \verb=/\= to denote +the logical conjunction (\texttt{and}). Such a notation is declared +by + +\begin{coq_example*} +Notation "A /\ B" := (and A B). +\end{coq_example*} + +The expression \texttt{(and A B)} is the abbreviated term and the +string \verb="A /\ B"= (called a {\em notation}) tells how it is +symbolically written. + +A notation is always surrounded by double quotes (excepted when the +abbreviation is a single ident, see \ref{Abbreviations}). The +notation is composed of {\em tokens} separated by spaces. Identifiers +in the string (such as \texttt{A} and \texttt{B}) are the {\em +parameters} of the notation. They must occur at least once each in the +denoted term. The other elements of the string (such as \verb=/\=) are +the {\em symbols}. + +An identifier can be used as a symbol but it must be surrounded by +simple quotes to avoid the confusion with a parameter. Similarly, +every symbol of at least 3 characters and starting with a simple quote +must be quoted (then it starts by two single quotes). Here is an example. + +\begin{coq_example*} +Notation "'IF' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3). +\end{coq_example*} + +%TODO quote the identifier when not in front, not a keyword, as in "x 'U' y" ? + +A notation binds a syntactic expression to a term. Unless the parser +and pretty-printer of {\Coq} already know how to deal with the +syntactic expression (see \ref{ReservedNotation}), explicit precedences and +associativity rules have to be given. + +\subsection{Precedences and associativity} +\index{Precedences} +\index{Associativity} + +Mixing different symbolic notations in a same text may cause serious +parsing ambiguity. To deal with the ambiguity of notations, {\Coq} +uses precedence levels ranging from 0 to 100 (plus one extra level +numbered 200) and associativity rules. + +Consider for example the new notation + +\begin{coq_example*} +Notation "A \/ B" := (or A B). +\end{coq_example*} + +Clearly, an expression such as {\tt (A:Prop)True \verb=/\= A \verb=\/= +A \verb=\/= False} is ambiguous. To tell the {\Coq} parser how to +interpret the expression, a priority between the symbols \verb=/\= and +\verb=\/= has to be given. Assume for instance that we want conjunction +to bind more than disjunction. This is expressed by assigning a +precedence level to each notation, knowing that a lower level binds +more than a higher level. Hence the level for disjunction must be +higher than the level for conjunction. + +Since connectives are the less tight articulation points of a text, it +is reasonable to choose levels not so far from the higher level which +is 100, for example 85 for disjunction and 80 for +conjunction\footnote{which are the levels effectively chosen in the +current implementation of {\Coq}}. + +Similarly, an associativity is needed to decide whether {\tt True \verb=/\= +False \verb=/\= False} defaults to {\tt True \verb=/\= (False +\verb=/\= False)} (right associativity) or to {\tt (True +\verb=/\= False) \verb=/\= False} (left associativity). We may +even consider that the expression is not well-formed and that +parentheses are mandatory (this is a ``no associativity'')\footnote{ +{\Coq} accepts notations declared as no associative but the parser on +which {\Coq} is built, namely {\camlpppp}, currently does not implement the +no-associativity and replace it by a left associativity; hence it is +the same for {\Coq}: no-associativity is in fact left associativity}. +We don't know of a special convention of the associativity of +disjunction and conjunction, let's apply for instance a right +associativity (which is the choice of {\Coq}). + +Precedence levels and associativity rules of notations have to be +given between parentheses in a list of modifiers that the +\texttt{Notation} command understands. Here is how the previous +examples refine. + +\begin{coq_example*} +Notation "A /\ B" := (and A B) (at level 80, right associativity). +Notation "A \/ B" := (or A B) (at level 85, right associativity). +\end{coq_example*} + +By default, a notation is considered non associative, but the +precedence level is mandatory (except for special cases whose level is +canonical). The level is either a number or the mention {\tt next +level} whose meaning is obvious. The list of levels already assigned +is on Figure~\ref{init-notations}. + +\subsection{Complex notations} + +Notations can be made from arbitraly complex symbols. One can for +instance define prefix notations. + +\begin{coq_example*} +Notation "~ x" := (not x) (at level 75, right associativity). +\end{coq_example*} + +One can also define notations for incomplete terms, with the hole +expected to be inferred at typing time. + +\begin{coq_example*} +Notation "x = y" := (@eq _ x y) (at level 70, no associativity). +\end{coq_example*} + +One can define {\em closed} notations whose both sides are symbols. In +this case, the default precedence level for inner subexpression is 200. + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is correct but produces **********) +(**** an incompatibility with the reserved notation ********) +\end{coq_eval} +\begin{coq_example*} +Notation "( x , y )" := (@pair _ _ x y) (at level 0). +\end{coq_example*} + +One can also define notations for binders. + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is correct but produces **********) +(**** an incompatibility with the reserved notation ********) +\end{coq_eval} +\begin{coq_example*} +Notation "{ x : A | P }" := (sig A (fun x => P)) (at level 0). +\end{coq_example*} + +In the last case though, there is a conflict with the notation for +type casts. This last notation, as shown by the command {\tt Print Grammar +constr} is at level 100. To avoid \verb=x : A= being parsed as a type cast, +it is necessary to put {\tt x} at a level below 100, typically 99. Hence, a +correct definition is + +\begin{coq_example*} +Notation "{ x : A | P }" := (sig A (fun x => P)) (at level 0, x at level 99). +\end{coq_example*} + +%This change has retrospectively an effect on the notation for notation +%{\tt "{ A } + { B }"}. For the sake of factorization, {\tt A} must be +%put at level 99 too, which gives +% +%\begin{coq_example*} +%Notation "{ A } + { B }" := (sumbool A B) (at level 0, A at level 99). +%\end{coq_example*} + +See the next section for more about factorization. + +\subsection{Simple factorization rules} + +{\Coq} extensible parsing is performed by Camlp4 which is essentially a +LL1 parser. Hence, some care has to be taken not to hide already +existing rules by new rules. Some simple left factorization work has +to be done. Here is an example. + +\begin{coq_eval} +(********** The next rule for notation _ < _ < _ produces **********) +(*** Error: Notation _ < _ < _ is already defined at level 70 ... ***) +\end{coq_eval} +\begin{coq_example*} +Notation "x < y" := (lt x y) (at level 70). +Notation "x < y < z" := (x < y /\ y < z) (at level 70). +\end{coq_example*} + +In order to factorize the left part of the rules, the subexpression +referred by {\tt y} has to be at the same level in both rules. However +the default behavior puts {\tt y} at the next level below 70 +in the first rule (no associativity is the default), and at the level +200 in the second rule (level 200 is the default for inner expressions). +To fix this, we need to force the parsing level of {\tt y}, +as follows. + +\begin{coq_example*} +Notation "x < y" := (lt x y) (at level 70). +Notation "x < y < z" := (x < y /\ y < z) (at level 70, y at next level). +\end{coq_example*} + +For the sake of factorization with {\Coq} predefined rules, simple +rules have to be observed for notations starting with a symbol: +e.g. rules starting with ``\{'' or ``('' should be put at level 0. The +list of {\Coq} predefined notations can be found in chapter \ref{Theories}. + +The command to display the current state of the {\Coq} term parser is +\comindex{Print Grammar constr} + +\begin{quote} +\tt Print Grammar constr. +\end{quote} + +\subsection{Displaying symbolic notations} + +The command \texttt{Notation} has an effect both on the {\Coq} parser and +on the {\Coq} printer. For example: + +\begin{coq_example} +Check (and True True). +\end{coq_example} + +However, printing, especially pretty-printing, requires +more care than parsing. We may want specific indentations, +line breaks, alignment if on several lines, etc. + +The default printing of notations is very rudimentary. For printing a +notation, a {\em formatting box} is opened in such a way that if the +notation and its arguments cannot fit on a single line, a line break +is inserted before the symbols of the notation and the arguments on +the next lines are aligned with the argument on the first line. + +A first, simple control that a user can have on the printing of a +notation is the insertion of spaces at some places of the +notation. This is performed by adding extra spaces between the symbols +and parameters: each extra space (other than the single space needed +to separate the components) is interpreted as a space to be inserted +by the printer. Here is an example showing how to add spaces around +the bar of the notation. + +\begin{coq_example} +Notation "{{ x : A | P }}" := (sig (fun x : A => P)) + (at level 0, x at level 99). +Check (sig (fun x : nat => x=x)). +\end{coq_example} + +The second, more powerful control on printing is by using the {\tt +format} modifier. Here is an example + +\begin{small} +\begin{coq_example} +Notation "'If' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3) +(at level 200, right associativity, format +"'[v ' 'If' c1 '/' '[' 'then' c2 ']' '/' '[' 'else' c3 ']' ']'"). +\end{coq_example} +\end{small} + +A {\em format} is an extension of the string denoting the notation with +the possible following elements delimited by single quotes: + +\begin{itemize} +\item extra spaces are translated into simple spaces +\item tokens of the form \verb='/ '= are translated into breaking point, + in case a line break occurs, an indentation of the number of spaces + after the ``\verb=/='' is applied (2 spaces in the given example) +\item token of the form \verb='//'= force writing on a new line +\item well-bracketed pairs of tokens of the form \verb='[ '= and \verb=']'= + are translated into printing boxes; in case a line break occurs, + an extra indentation of the number of spaces given after the ``\verb=[='' + is applied (4 spaces in the example) +\item well-bracketed pairs of tokens of the form \verb='[hv '= and \verb=']'= + are translated into horizontal-orelse-vertical printing boxes; + if the content of the box does not fit on a single line, then every breaking + point forces a newline and an extra indentation of the number of spaces + given after the ``\verb=[='' is applied at the beginning of each newline + (3 spaces in the example) +\item well-bracketed pairs of tokens of the form \verb='[v '= and + \verb=']'= are translated into vertical printing boxes; every + breaking point forces a newline, even if the line is large enough to + display the whole content of the box, and an extra indentation of the + number of spaces given after the ``\verb=[='' is applied at the beginning + of each newline +\end{itemize} + +Thus, for the previous example, we get +%\footnote{The ``@'' is here to shunt +%the notation "'IF' A 'then' B 'else' C" which is defined in {\Coq} +%initial state}: + +Notations do not survive the end of sections. No typing of the denoted +expression is performed at definition time. Type-checking is done only +at the time of use of the notation. + +\begin{coq_example} +Check + (IF_then_else (IF_then_else True False True) + (IF_then_else True False True) + (IF_then_else True False True)). +\end{coq_example} + +\Rem +Sometimes, a notation is expected only for the parser. +%(e.g. because +%the underlying parser of {\Coq}, namely {\camlpppp}, is LL1 and some extra +%rules are needed to circumvent the absence of factorization). +To do so, the option {\em only parsing} is allowed in the list of modifiers of +\texttt{Notation}. + +\subsection{The \texttt{Infix} command +\comindex{Infix}} + +The \texttt{Infix} command is a shortening for declaring notations of +infix symbols. Its syntax is + +\begin{quote} +\noindent\texttt{Infix "{\symbolentry}" :=} {\qualid} {\tt (} \nelist{\em modifier}{,} {\tt )}. +\end{quote} + +and it is equivalent to + +\begin{quote} +\noindent\texttt{Notation "x {\symbolentry} y" := ({\qualid} x y) (} \nelist{\em modifier}{,} {\tt )}. +\end{quote} + +where {\tt x} and {\tt y} are fresh names distinct from {\qualid}. Here is an example. + +\begin{coq_example*} +Infix "/\" := and (at level 80, right associativity). +\end{coq_example*} + +\subsection{Reserving notations +\label{ReservedNotation} +\comindex{ReservedNotation}} + +A given notation may be used in different contexts. {\Coq} expects all +uses of the notation to be defined at the same precedence and with the +same associativity. To avoid giving the precedence and associativity +every time, it is possible to declare a parsing rule in advance +without giving its interpretation. Here is an example from the initial +state of {\Coq}. + +\begin{coq_example} +Reserved Notation "x = y" (at level 70, no associativity). +\end{coq_example} + +Reserving a notation is also useful for simultaneously defined an +inductive type or a recursive constant and a notation for it. + +\Rem The notations mentioned on Figure~\ref{init-notations} are +reserved. Hence their precedence and associativity cannot be changed. + +\subsection{Simultaneous definition of terms and notations +\comindex{Fixpoint {\ldots} where {\ldots}} +\comindex{CoFixpoint {\ldots} where {\ldots}} +\comindex{Inductive {\ldots} where {\ldots}}} + +Thanks to reserved notations, the inductive, coinductive, recursive +and corecursive definitions can benefit of customized notations. To do +this, insert a {\tt where} notation clause after the definition of the +(co)inductive type or (co)recursive term (or after the definition of +each of them in case of mutual definitions). The exact syntax is given +on Figure \ref{notation-syntax}. Here are examples: + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is correct but produces an error **********) +(********** because the symbol /\ is already bound **********) +(**** Error: The conclusion of A -> B -> A /\ B is not valid *****) +\end{coq_eval} + +\begin{coq_example*} +Inductive and (A B:Prop) : Prop := conj : A -> B -> A /\ B +where "A /\ B" := (and A B). +\end{coq_example*} + +\begin{coq_eval} +Set Printing Depth 50. +(********** The following is correct but produces an error **********) +(********** because the symbol + is already bound **********) +(**** Error: no recursive definition *****) +\end{coq_eval} + +\begin{coq_example*} +Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (p+m) + end +where "n + m" := (plus n m). +\end{coq_example*} + +\subsection{Displaying informations about notations +\comindex{Set Printing Notations} +\comindex{Unset Printing Notations}} + +To deactivate the printing of all notations, use the command +\begin{quote} +\tt Unset Printing Notations. +\end{quote} +To reactivate it, use the command +\begin{quote} +\tt Set Printing Notations. +\end{quote} +The default is to use notations for printing terms wherever possible. + +\SeeAlso {\tt Set Printing All} in section \ref{SetPrintingAll}. + +\subsection{Locating notations +\comindex{Locate} +\label{LocateSymbol}} + +To know to which notations a given symbol belongs to, use the command +\begin{quote} +\tt Locate {\symbolentry} +\end{quote} +where symbol is any (composite) symbol surrounded by quotes. To locate +a particular notation, use a string where the variables of the +notation are replaced by ``\_''. + +\Example +\begin{coq_example} +Locate "exists". +Locate "'exists' _ , _". +\end{coq_example} + +\SeeAlso Section \ref{Locate}. + +\begin{figure} +\begin{centerframe} +\begin{tabular}{lcl} +{\sentence} & ::= & + \texttt{Notation} \zeroone{\tt Local} {\str} \texttt{:=} {\term} + \zeroone{\modifiers} \zeroone{:{\scope}} .\\ + & $|$ & + \texttt{Infix} \zeroone{\tt Local} {\str} \texttt{:=} {\qualid} + \zeroone{\modifiers} \zeroone{:{\scope}} .\\ + & $|$ & + \texttt{Reserved Notation} \zeroone{\tt Local} {\str} + \zeroone{\modifiers} .\\ + & $|$ & {\tt Inductive} + \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ + & $|$ & {\tt CoInductive} + \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ + & $|$ & {\tt Fixpoint} + \nelist{{\fixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ + & $|$ & {\tt CoFixpoint} + \nelist{{\cofixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ +\\ +{\declnotation} & ::= & + \zeroone{{\tt where} {\str} {\tt :=} {\term} \zeroone{:{\scope}}} . +\\ +\\ +{\modifiers} + & ::= & \nelist{\ident}{,} {\tt at level} {\naturalnumber} \\ + & $|$ & \nelist{\ident}{,} {\tt at next level} \\ + & $|$ & {\tt at level} {\naturalnumber} \\ + & $|$ & {\tt left associativity} \\ + & $|$ & {\tt right associativity} \\ + & $|$ & {\tt no associativity} \\ + & $|$ & {\ident} {\tt ident} \\ + & $|$ & {\ident} {\tt global} \\ + & $|$ & {\ident} {\tt bigint} \\ + & $|$ & {\tt only parsing} \\ + & $|$ & {\tt format} {\str} +\end{tabular} +\end{centerframe} +\caption{Syntax of the variants of {\tt Notation}} +\label{notation-syntax} +\end{figure} + +\subsection{Notations with recursive patterns} + +An experimental mechanism is provided for declaring elementary +notations including recursive patterns. The basic syntax is + +\begin{coq_eval} +Require Import List. +\end{coq_eval} + +\begin{coq_example*} +Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..). +\end{coq_example*} + +On the right-hand-side, an extra construction of the form {\tt ..} ($f$ +$t_1$ $\ldots$ $t_n$) {\tt ..} can be used. Notice that {\tt ..} is part of +the {\Coq} syntax while $\ldots$ is just a meta-notation of this +manual to denote a sequence of terms of arbitrary size. + +This extra construction enclosed within {\tt ..}, let's call it $t$, +must be one of the argument of an applicative term of the form {\tt +($f$ $u_1$ $\ldots$ $u_n$)}. The sequences $t_1$ $\ldots$ $t_n$ and +$u_1$ $\ldots$ $u_n$ must coincide everywhere but in two places. In +one place, say the terms of indice $i$, we must have $u_i = t$. In the +other place, say the terms of indice $j$, both $u_j$ and $t_j$ must be +variables, say $x$ and $y$ which are bound by the notation string on +the left-hand-side of the declaration. The variables $x$ and $y$ in +the string must occur in a substring of the form "$x$ $s$ {\tt ..} $s$ +$y$" where {\tt ..} is part of the syntax and $s$ is two times the +same sequence of terminal symbols (i.e. symbols which are not +variables). + +These invariants must be satisfied in order the notation to be +correct. The term $t_i$ is the {\em terminating} expression of +the notation and the pattern {\tt ($f$ $u_1$ $\ldots$ $u_{i-1}$ {\rm [I]} +$u_{i+1}$ $\ldots$ $u_{j-1}$ {\rm [E]} $u_{j+1}$ $\ldots$ $u_{n}$)} is the +{\em iterating pattern}. The hole [I] is the {\em iterative} place +and the hole [E] is the {\em enumerating} place. Remark that if $j
+ + + + + + + + +
Cover page
Table of contents
+Bibliography
+Global Index +
+Tactics Index +
+Vernacular Commands Index +
+Index of Error Messages +
+ + + + \ No newline at end of file diff --git a/doc/rt/RefMan-cover.tex b/doc/rt/RefMan-cover.tex new file mode 100644 index 00000000..d881329a --- /dev/null +++ b/doc/rt/RefMan-cover.tex @@ -0,0 +1,46 @@ +\documentstyle[RRcover]{book} + % L'utilisation du style `french' force le résumé français à + % apparaître en premier. + +\RRtitle{Manuel de r\'ef\'erence du syst\`eme Coq \\ version V7.1} +\RRetitle{The Coq Proof Assistant \\ Reference Manual \\ Version 7.1 +\thanks +{This research was partly supported by ESPRIT Basic Research +Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} +} +\RRauthor{Bruno Barras, Samuel Boutin, Cristina Cornes, +Judica\"el Courant, Jean-Christophe Filli\^atre, Eduardo Gim\'enez, +Hugo Herbelin, G\'erard Huet, C\'esar Mu\~noz, Chetan Murthy, +Catherine Parent, Christine Paulin-Mohring, +Amokrane Sa{\"\i}bi, Benjamin Werner} +\authorhead{} +\titlehead{Coq V7.1 Reference Manual} +\RRtheme{2} +\RRprojet{Coq} +\RRNo{0123456789} +\RRdate{May 1997} +%\RRpages{} +\URRocq + +\RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la +v\'erification de preuves formelles dans une logique d'ordre +sup\'erieure incluant un riche langage de d\'efinitions de fonctions. +Ce document constitue le manuel de r\'ef\'erence de la version V7.1 +qui est distribu\'ee par ftp anonyme à l'adresse +\url{ftp://ftp.inria.fr/INRIA/coq/}} + +\RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, +Calcul des Constructions Inductives} + + +\RRabstract{Coq is a proof assistant based on a higher-order logic +allowing powerful definitions of functions. +Coq V7.1 is available by anonymous +ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} + +\RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives +Constructions} + +\begin{document} +\makeRT +\end{document} diff --git a/doc/rt/Tutorial-cover.tex b/doc/rt/Tutorial-cover.tex new file mode 100644 index 00000000..b747b812 --- /dev/null +++ b/doc/rt/Tutorial-cover.tex @@ -0,0 +1,48 @@ +\documentstyle[RRcover]{book} + % L'utilisation du style `french' force le résumé français à + % apparaître en premier. +\RRetitle{ +The Coq Proof Assistant \\ A Tutorial \\ Version 7.1 +\thanks{This research was partly supported by ESPRIT Basic Research +Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} +} +\RRtitle{Coq \\ Une introduction \\ V7.1 } +\RRauthor{G\'erard Huet, Gilles Kahn and Christine Paulin-Mohring} +\RRtheme{2} +\RRprojet{{Coq +\\[15pt] +{INRIA Rocquencourt} +{\hskip -5.25pt} +~~{\bf ---}~~ + \def\thefootnote{\arabic{footnote}\hss} +{CNRS - ENS Lyon} +\footnote[1]{LIP URA 1398 du CNRS, +46 All\'ee d'Italie, 69364 Lyon CEDEX 07, France.} +{\hskip -14pt}}} + +%\RRNo{0123456789} +\RRNo{0204} +\RRdate{Ao\^ut 1997} + +\URRocq +\RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la +v\'erification de preuves formelles dans une logique d'ordre +sup\'erieure incluant un riche langage de d\'efinitions de fonctions. +Ce document constitue une introduction pratique \`a l'utilisation de +la version V7.1 qui est distribu\'ee par ftp anonyme à l'adresse +\url{ftp://ftp.inria.fr/INRIA/coq/}} + +\RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, Calcul +des Constructions Inductives} + +\RRabstract{Coq is a proof assistant based on a higher-order logic +allowing powerful definitions of functions. This document is a +tutorial for the version V7.1 of Coq. This version is available by +anonymous ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} + +\RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives +Constructions} + +\begin{document} +\makeRT +\end{document} diff --git a/doc/stdlib/Library.tex b/doc/stdlib/Library.tex new file mode 100755 index 00000000..97748af6 --- /dev/null +++ b/doc/stdlib/Library.tex @@ -0,0 +1,62 @@ +\documentclass[11pt]{article} + +\usepackage[latin1]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{fullpage} +\usepackage{coqdoc} + +\input{../common/version} +\input{../common/title} +\input{../common/macros} + +\begin{document} + +\coverpage{The standard library}% +{\ } +{This material is distributed under the terms of the GNU Lesser +General Public License Version 2.1.} + +\tableofcontents + +\newpage +\section*{The \Coq\ standard library} + +This document is a short description of the \Coq\ standard library. +This library comes with the system as a complement of the core library +(the {\bf Init} library ; see the Reference Manual for a description +of this library). It provides a set of modules directly available +through the \verb!Require! command. + +The standard library is composed of the following subdirectories: +\begin{description} + \item[Logic] Classical logic and dependent equality + \item[Bool] Booleans (basic functions and results) + \item[Arith] Basic Peano arithmetic + \item[ZArith] Basic integer arithmetic + \item[Reals] Classical Real Numbers and Analysis + \item[Lists] Monomorphic and polymorphic lists (basic functions and + results), Streams (infinite sequences defined + with co-inductive types) + \item[Sets] Sets (classical, constructive, finite, infinite, power set, + etc.) + \item[Relations] Relations (definitions and basic results). + \item[Sorting] Sorted list (basic definitions and heapsort + correctness). + \item[Wellfounded] Well-founded relations (basic results). + \item[IntMap] Representation of finite sets by an efficient + structure of map (trees indexed by binary integers). +\end{description} + + +Each of these subdirectories contains a set of modules, whose +specifications (\gallina{} files) have +been roughly, and automatically, pasted in the following pages. There +is also a version of this document in HTML format on the WWW, which +you can access from the \Coq\ home page at +\texttt{http://coq.inria.fr/library}. + +\input{Library.coqdoc} + +\end{document} + +% $Id: Library.tex 8626 2006-03-14 15:01:00Z notin $ diff --git a/doc/stdlib/index-list.html.template b/doc/stdlib/index-list.html.template new file mode 100644 index 00000000..cbb8580d --- /dev/null +++ b/doc/stdlib/index-list.html.template @@ -0,0 +1,339 @@ + + + + +The Coq Standard Library +</head> + +<body> + +<H1>The Coq Standard Library</H1> + +<p>Here is a short description of the Coq standard library, which is +distributed with the system. +It provides a set of modules directly available +through the <tt>Require Import</tt> command.</p> + +<p>The standard library is composed of the following subdirectories:</p> + +<dl> + <dt> <b>Init</b>: + The core library (automatically loaded when starting Coq) + </dt> + <dd> + theories/Init/Notations.v + theories/Init/Datatypes.v + theories/Init/Logic.v + theories/Init/Logic_Type.v + theories/Init/Peano.v + theories/Init/Specif.v + theories/Init/Tactics.v + theories/Init/Wf.v + (theories/Init/Prelude.v) + </dd> + + <dt> <b>Logic</b>: + Classical logic and dependent equality + </dt> + <dd> + theories/Logic/Classical_Pred_Set.v + theories/Logic/Classical_Pred_Type.v + theories/Logic/Classical_Prop.v + theories/Logic/Classical_Type.v + (theories/Logic/Classical.v) + theories/Logic/Decidable.v + theories/Logic/Eqdep_dec.v + theories/Logic/EqdepFacts.v + theories/Logic/Eqdep.v + theories/Logic/JMeq.v + theories/Logic/RelationalChoice.v + theories/Logic/ClassicalChoice.v + theories/Logic/ChoiceFacts.v + theories/Logic/ClassicalDescription.v + theories/Logic/ClassicalFacts.v + theories/Logic/Berardi.v + theories/Logic/Diaconescu.v + theories/Logic/Hurkens.v + theories/Logic/ProofIrrelevance.v + theories/Logic/ProofIrrelevanceFacts.v + </dd> + + <dt> <b>Arith</b>: + Basic Peano arithmetic + </dt> + <dd> + theories/Arith/Le.v + theories/Arith/Lt.v + theories/Arith/Plus.v + theories/Arith/Minus.v + theories/Arith/Mult.v + theories/Arith/Gt.v + theories/Arith/Between.v + theories/Arith/Peano_dec.v + theories/Arith/Compare_dec.v + (theories/Arith/Arith.v) + theories/Arith/Min.v + theories/Arith/Max.v + theories/Arith/Compare.v + theories/Arith/Div2.v + theories/Arith/Div.v + theories/Arith/EqNat.v + theories/Arith/Euclid.v + theories/Arith/Even.v + theories/Arith/Bool_nat.v + theories/Arith/Factorial.v + theories/Arith/Wf_nat.v + </dd> + + <dt> <b>NArith</b>: + Binary positive integers + </dt> + <dd> + theories/NArith/BinPos.v + theories/NArith/BinNat.v + (theories/NArith/NArith.v) + theories/NArith/Pnat.v + </dd> + + <dt> <b>ZArith</b>: + Binary integers + </dt> + <dd> + theories/ZArith/BinInt.v + theories/ZArith/Zorder.v + theories/ZArith/Zcompare.v + theories/ZArith/Znat.v + theories/ZArith/Zmin.v + theories/ZArith/Zmax.v + theories/ZArith/Zminmax.v + theories/ZArith/Zabs.v + theories/ZArith/Zeven.v + theories/ZArith/auxiliary.v + theories/ZArith/ZArith_dec.v + theories/ZArith/Zbool.v + theories/ZArith/Zmisc.v + theories/ZArith/Wf_Z.v + theories/ZArith/Zhints.v + (theories/ZArith/ZArith_base.v) + theories/ZArith/Zcomplements.v + theories/ZArith/Zsqrt.v + theories/ZArith/Zpower.v + theories/ZArith/Zdiv.v + theories/ZArith/Zlogarithm.v + (theories/ZArith/ZArith.v) + theories/ZArith/Zwf.v + theories/ZArith/Zbinary.v + theories/ZArith/Znumtheory.v + </dd> + + <dt> <b>Reals</b>: + Formalization of real numbers + </dt> + <dd> + theories/Reals/Rdefinitions.v + theories/Reals/Raxioms.v + theories/Reals/RIneq.v + theories/Reals/DiscrR.v + (theories/Reals/Rbase.v) + theories/Reals/RList.v + theories/Reals/Ranalysis.v + theories/Reals/Rbasic_fun.v + theories/Reals/Rderiv.v + theories/Reals/Rfunctions.v + theories/Reals/Rgeom.v + theories/Reals/R_Ifp.v + theories/Reals/Rlimit.v + theories/Reals/Rseries.v + theories/Reals/Rsigma.v + theories/Reals/R_sqr.v + theories/Reals/Rtrigo_fun.v + theories/Reals/Rtrigo.v + theories/Reals/SplitAbsolu.v + theories/Reals/SplitRmult.v + theories/Reals/Alembert.v + theories/Reals/AltSeries.v + theories/Reals/ArithProp.v + theories/Reals/Binomial.v + theories/Reals/Cauchy_prod.v + theories/Reals/Cos_plus.v + theories/Reals/Cos_rel.v + theories/Reals/Exp_prop.v + theories/Reals/Integration.v + theories/Reals/MVT.v + theories/Reals/NewtonInt.v + theories/Reals/PSeries_reg.v + theories/Reals/PartSum.v + theories/Reals/R_sqrt.v + theories/Reals/Ranalysis1.v + theories/Reals/Ranalysis2.v + theories/Reals/Ranalysis3.v + theories/Reals/Ranalysis4.v + theories/Reals/Rcomplete.v + theories/Reals/RiemannInt.v + theories/Reals/RiemannInt_SF.v + theories/Reals/Rpower.v + theories/Reals/Rprod.v + theories/Reals/Rsqrt_def.v + theories/Reals/Rtopology.v + theories/Reals/Rtrigo_alt.v + theories/Reals/Rtrigo_calc.v + theories/Reals/Rtrigo_def.v + theories/Reals/Rtrigo_reg.v + theories/Reals/SeqProp.v + theories/Reals/SeqSeries.v + theories/Reals/Sqrt_reg.v + (theories/Reals/Reals.v) + </dd> + + <dt> <b>Bool</b>: + Booleans (basic functions and results) + </dt> + <dd> + theories/Bool/Bool.v + theories/Bool/BoolEq.v + theories/Bool/DecBool.v + theories/Bool/IfProp.v + theories/Bool/Sumbool.v + theories/Bool/Zerob.v + theories/Bool/Bvector.v + </dd> + + <dt> <b>Lists</b>: + Polymorphic lists, Streams (infinite sequences) + </dt> + <dd> + theories/Lists/List.v + theories/Lists/ListSet.v + theories/Lists/MonoList.v + theories/Lists/MoreList.v + theories/Lists/SetoidList.v + theories/Lists/Streams.v + theories/Lists/TheoryList.v + </dd> + + <dt> <b>Sets</b>: + Sets (classical, constructive, finite, infinite, powerset, etc.) + </dt> + <dd> + theories/Sets/Classical_sets.v + theories/Sets/Constructive_sets.v + theories/Sets/Cpo.v + theories/Sets/Ensembles.v + theories/Sets/Finite_sets_facts.v + theories/Sets/Finite_sets.v + theories/Sets/Image.v + theories/Sets/Infinite_sets.v + theories/Sets/Integers.v + theories/Sets/Multiset.v + theories/Sets/Partial_Order.v + theories/Sets/Permut.v + theories/Sets/Powerset_Classical_facts.v + theories/Sets/Powerset_facts.v + theories/Sets/Powerset.v + theories/Sets/Relations_1_facts.v + theories/Sets/Relations_1.v + theories/Sets/Relations_2_facts.v + theories/Sets/Relations_2.v + theories/Sets/Relations_3_facts.v + theories/Sets/Relations_3.v + theories/Sets/Uniset.v + </dd> + + <dt> <b>Relations</b>: + Relations (definitions and basic results) + </dt> + <dd> + theories/Relations/Relation_Definitions.v + theories/Relations/Relation_Operators.v + theories/Relations/Relations.v + theories/Relations/Operators_Properties.v + theories/Relations/Rstar.v + theories/Relations/Newman.v + </dd> + + <dt> <b>Wellfounded</b>: + Well-founded Relations + </dt> + <dd> + theories/Wellfounded/Disjoint_Union.v + theories/Wellfounded/Inclusion.v + theories/Wellfounded/Inverse_Image.v + theories/Wellfounded/Lexicographic_Exponentiation.v + theories/Wellfounded/Lexicographic_Product.v + theories/Wellfounded/Transitive_Closure.v + theories/Wellfounded/Union.v + theories/Wellfounded/Wellfounded.v + theories/Wellfounded/Well_Ordering.v + </dd> + + <dt> <b>Sorting</b>: + Axiomatizations of sorts + </dt> + <dd> + theories/Sorting/Heap.v + theories/Sorting/Permutation.v + theories/Sorting/Sorting.v + </dd> + + <dt> <b>Setoids</b>: + <dd> + theories/Setoids/Setoid.v + </dd> + + <dt> <b>IntMap</b>: + Finite sets/maps as trees indexed by addresses + </dt> + <dd> + theories/IntMap/Addr.v + theories/IntMap/Adist.v + theories/IntMap/Addec.v + theories/IntMap/Adalloc.v + theories/IntMap/Map.v + theories/IntMap/Fset.v + theories/IntMap/Mapaxioms.v + theories/IntMap/Mapiter.v + theories/IntMap/Mapcanon.v + theories/IntMap/Mapsubset.v + theories/IntMap/Lsort.v + theories/IntMap/Mapfold.v + theories/IntMap/Mapcard.v + theories/IntMap/Mapc.v + theories/IntMap/Maplists.v + theories/IntMap/Allmaps.v + </dd> + + <dt> <b>FSets</b>: + Modular implementation of finite sets/maps using lists + </dt> + <dd> + theories/FSets/DecidableType.v + theories/FSets/OrderedType.v + theories/FSets/FSetInterface.v + theories/FSets/FSetBridge.v + theories/FSets/FSetProperties.v + theories/FSets/FSetEqProperties.v + theories/FSets/FSetFacts.v + theories/FSets/FSetList.v + theories/FSets/FSet.v + theories/FSets/FMapInterface.v + theories/FSets/FMapList.v + theories/FSets/FMap.v + theories/FSets/FSetWeakInterface.v + theories/FSets/FSetWeakFacts.v + theories/FSets/FSetWeakList.v + theories/FSets/FSetWeak.v + theories/FSets/FMapWeakInterface.v + theories/FSets/FMapWeakList.v + theories/FSets/FMapWeak.v + </dd> + + <dt> <b>Strings</b> + Implementation of string as list of ascii characters + </dt> + <dd> + theories/Strings/Ascii.v + theories/Strings/String.v + </dd> + + </dd> +</dl> diff --git a/doc/stdlib/index-trailer.html b/doc/stdlib/index-trailer.html new file mode 100644 index 00000000..308b1d01 --- /dev/null +++ b/doc/stdlib/index-trailer.html @@ -0,0 +1,2 @@ +</body> +</html> diff --git a/doc/stdlib/make-library-files b/doc/stdlib/make-library-files new file mode 100755 index 00000000..91e3cc3f --- /dev/null +++ b/doc/stdlib/make-library-files @@ -0,0 +1,36 @@ +#!/bin/sh + +# Needs COQTOP and GALLINA set + +# On garde la liste de tous les *.v avec dates dans library.files.ls +# Si elle a change depuis la derniere fois ou library.files n'existe pas +# on fabrique des .g (si besoin) et la liste library.files dans +# l'ordre de ls -tr des *.vo +# Ce dernier trie les fichiers dans l'ordre inverse de leur date de création +# En supposant que make fait son boulot, ca fait un tri topologique du +# graphe des dépendances + +LIBDIRS="Arith NArith ZArith Reals Logic Bool Lists IntMap Relations Sets Sorting Wellfounded Setoids" + +rm -f library.files.ls.tmp +(cd $COQTOP/theories; find $LIBDIR -name "*.v" -ls) > library.files.ls.tmp +if ! test -e library.files || ! cmp library.files.ls library.files.ls.tmp; then + mv -f library.files.ls.tmp library.files.ls + rm -f library.files; touch library.files + ABSOLUTE=`pwd`/library.files + cd $COQTOP/theories + echo $LIBDIRS + for rep in $LIBDIRS ; do + (cd $rep + echo $rep/intro.tex >> $ABSOLUTE + VOFILES=`ls -tr *.vo` + for file in $VOFILES ; do + VF=`basename $file \.vo` + if [ \( ! -e $VF.g \) -o \( $VF.v -nt $VF.g \) ] ; then + $GALLINA $VF.v + fi + echo $rep/$VF.g >> $ABSOLUTE + done + ) + done +fi diff --git a/doc/stdlib/make-library-index b/doc/stdlib/make-library-index new file mode 100755 index 00000000..ddbcd09f --- /dev/null +++ b/doc/stdlib/make-library-index @@ -0,0 +1,35 @@ +#!/bin/sh + +# Instantiate links to library files in index template + +FILE=$1 + +cp -f $FILE.template tmp +echo -n Building file index-list.prehtml ... + +for i in ../theories/*; do + echo $i + + d=`basename $i` + if [ "$d" != "Num" -a "$d" != "CVS" ]; then + for j in $i/*.v; do + b=`basename $j .v` + rm -f tmp2 + grep -q theories/$d/$b.v tmp + a=$? + if [ $a = 0 ]; then + sed -e "s:theories/$d/$b.v:<a href=\"Coq.$d.$b.html\">$b</a>:g" tmp > tmp2 + mv -f tmp2 tmp + else + echo Warning: theories/$d/$b.v is missing in the template file + fi + done + fi + rm -f tmp2 + sed -e "s/#$d#//" tmp > tmp2 + mv -f tmp2 tmp +done +a=`grep theories tmp` +if [ $? = 0 ]; then echo Warning: extra files:; echo $a; fi +mv tmp $FILE +echo Done diff --git a/doc/tools/Translator.tex b/doc/tools/Translator.tex new file mode 100644 index 00000000..005ca9c0 --- /dev/null +++ b/doc/tools/Translator.tex @@ -0,0 +1,898 @@ +\ifx\pdfoutput\undefined % si on est pas en pdflatex +\documentclass[11pt,a4paper]{article} +\else +\documentclass[11pt,a4paper,pdftex]{article} +\fi +\usepackage[latin1]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{pslatex} +\usepackage{url} +\usepackage{verbatim} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{array} +\usepackage{fullpage} + +\title{Translation from Coq V7 to V8} +\author{The Coq Development Team} + +%% Macros etc. +\catcode`\_=13 +\let\subscr=_ +\def_{\ifmmode\sb\else\subscr\fi} + +\def\NT#1{\langle\textit{#1}\rangle} +\def\NTL#1#2{\langle\textit{#1}\rangle_{#2}} +%\def\TERM#1{\textsf{\bf #1}} +\def\TERM#1{\texttt{#1}} +\newenvironment{transbox} + {\begin{center}\tt\begin{tabular}{l|ll} \hfil\textrm{V7} & \hfil\textrm{V8} \\ \hline} + {\end{tabular}\end{center}} +\def\TRANS#1#2 + {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & + \begin{tabular}[t]{@{}l@{}}#2\end{tabular} \\} +\def\TRANSCOM#1#2#3 + {\begin{tabular}[t]{@{}l@{}}#1\end{tabular} & + \begin{tabular}[t]{@{}l@{}}#2\end{tabular} & #3 \\} + +%% +%% +%% +\begin{document} +\maketitle + +\section{Introduction} + +Coq version 8.0 is a major version and carries major changes: the +concrete syntax was redesigned almost from scratch, and many notions +of the libraries were renamed for uniformisation purposes. We felt +that these changes could discourage users with large theories from +switching to the new version. + +The goal of this document is to introduce these changes on simple +examples (mainly the syntactic changes), and describe the automated +tools to help moving to V8.0. Essentially, it consists of a translator +that takes as input a Coq source file in old syntax and produces a +file in new syntax and adapted to the new standard library. The main +extra features of this translator is that it keeps comments, even +those within expressions\footnote{The position of those comment might +differ slightly since there is no exact matching of positions between +old and new syntax.}. + +The document is organised as follows: first section describes the new +syntax on simple examples. It is very translation-oriented. This +should give users of older versions the flavour of the new syntax, and +allow them to make translation manually on small +examples. Section~\ref{Translation} explains how the translation +process can be automatised for the most part (the boring one: applying +similar changes over thousands of lines of code). We strongly advise +users to follow these indications, in order to avoid many potential +complications of the translation process. + + +\section{The new syntax on examples} + +The goal of this section is to introduce to the new syntax of Coq on +simple examples, rather than just giving the new grammar. It is +strongly recommended to read first the definition of the new syntax +(in the reference manual), but this document should also be useful for +the eager user who wants to start with the new syntax quickly. + +The toplevel has an option {\tt -translate} which allows to +interactively translate commands. This toplevel translator accepts a +command, prints the translation on standard output (after a % +\verb+New syntax:+ balise), executes the command, and waits for another +command. The only requirements is that they should be syntactically +correct, but they do not have to be well-typed. + +This interactive translator proved to be useful in two main +usages. First as a ``debugger'' of the translation. Before the +translation, it may help in spotting possible conflicts between the +new syntax and user notations. Or when the translation fails for some +reason, it makes it easy to find the exact reason why it failed and +make attempts in fixing the problem. + +The second usage of the translator is when trying to make the first +proofs in new syntax. Well trained users will automatically think +their scripts in old syntax and might waste much time (and the +intuition of the proof) if they have to search the translation in a +document. Running a translator in the background will allow the user +to instantly have the answer. + +The rest of this section is a description of all the aspects of the +syntax that changed and how they were translated. All the examples +below can be tested by entering the V7 commands in the toplevel +translator. + + +%% + +\subsection{Changes in lexical conventions w.r.t. V7} + +\subsubsection{Identifiers} + +The lexical conventions changed: \TERM{_} is not a regular identifier +anymore. It is used in terms as a placeholder for subterms to be inferred +at type-checking, and in patterns as a non-binding variable. + +Furthermore, only letters (Unicode letters), digits, single quotes and +_ are allowed after the first character. + +\subsubsection{Quoted string} + +Quoted strings are used typically to give a filename (which may not +be a regular identifier). As before they are written between double +quotes ("). Unlike for V7, there is no escape character: characters +are written normally except the double quote which is doubled. + +\begin{transbox} +\TRANS{"abcd$\backslash\backslash$efg"}{"abcd$\backslash$efg"} +\TRANS{"abcd$\backslash$"efg"}{"abcd""efg"} +\end{transbox} + + +\subsection{Main changes in terms w.r.t. V7} + + +\subsubsection{Precedence of application} + +In the new syntax, parentheses are not really part of the syntax of +application. The precedence of application (10) is tighter than all +prefix and infix notations. It makes it possible to remove parentheses +in many contexts. + +\begin{transbox} +\TRANS{(A x)->(f x)=(g y)}{A x -> f x = g y} +\TRANS{(f [x]x)}{f (fun x => x)} +\end{transbox} + + +\subsubsection{Arithmetics and scopes} + +The specialized notation for \TERM{Z} and \TERM{R} (introduced by +symbols \TERM{`} and \TERM{``}) have disappeared. They have been +replaced by the general notion of scope. + +\begin{center} +\begin{tabular}{l|l|l} +type & scope name & delimiter \\ +\hline +types & type_scope & \TERM{type} \\ +\TERM{bool} & bool_scope & \\ +\TERM{nat} & nat_scope & \TERM{nat} \\ +\TERM{Z} & Z_scope & \TERM{Z} \\ +\TERM{R} & R_scope & \TERM{R} \\ +\TERM{positive} & positive_scope & \TERM{P} +\end{tabular} +\end{center} + +In order to use notations of arithmetics on \TERM{Z}, its scope must +be opened with command \verb+Open Scope Z_scope.+ Another possibility +is using the scope change notation (\TERM{\%}). The latter notation is +to be used when notations of several scopes appear in the same +expression. + +In examples below, scope changes are not needed if the appropriate scope +has been opened. Scope \verb|nat_scope| is opened in the initial state of Coq. +\begin{transbox} +\TRANSCOM{`0+x=x+0`}{0+x=x+0}{\textrm{Z_scope}} +\TRANSCOM{``0 + [if b then ``1`` else ``2``]``}{0 + if b then 1 else 2}{\textrm{R_scope}} +\TRANSCOM{(0)}{0}{\textrm{nat_scope}} +\end{transbox} + +Below is a table that tells which notation is available in which +scope. The relative precedences and associativity of operators is the +same as in usual mathematics. See the reference manual for more +details. However, it is important to remember that unlike V7, the type +operators for product and sum are left-associative, in order not to +clash with arithmetic operators. + +\begin{center} +\begin{tabular}{l|l} +scope & notations \\ +\hline +nat_scope & \texttt{+ - * < <= > >=} \\ +Z_scope & \texttt{+ - * / mod < <= > >= ?=} \\ +R_scope & \texttt{+ - * / < <= > >=} \\ +type_scope & \texttt{* +} \\ +bool_scope & \texttt{\&\& || -} \\ +list_scope & \texttt{:: ++} +\end{tabular} +\end{center} + + + +\subsubsection{Notation for implicit arguments} + +The explicitation of arguments is closer to the \emph{bindings} +notation in tactics. Argument positions follow the argument names of +the head constant. The example below assumes \verb+f+ is a function +with two implicit dependent arguments named \verb+x+ and \verb+y+. +\begin{transbox} +\TRANS{f 1!t1 2!t2 t3}{f (x:=t1) (y:=t2) t3} +\TRANS{!f t1 t2}{@f t1 t2} +\end{transbox} + + +\subsubsection{Inferred subterms} + +Subterms that can be automatically inferred by the type-checker is now +written {\tt _} + +\begin{transbox} +\TRANS{?}{_} +\end{transbox} + +\subsubsection{Universal quantification} + +The universal quantification and dependent product types are now +introduced by the \texttt{forall} keyword before the binders and a +comma after the binders. + +The syntax of binders also changed significantly. A binder can simply be +a name when its type can be inferred. In other cases, the name and the type +of the variable are put between parentheses. When several consecutive +variables have the same type, they can be grouped. Finally, if all variables +have the same type, parentheses can be omitted. + +\begin{transbox} +\TRANS{(x:A)B}{forall (x:~A), B ~~\textrm{or}~~ forall x:~A, B} +\TRANS{(x,y:nat)P}{forall (x y :~nat), P ~~\textrm{or}~~ forall x y :~nat, P} +\TRANS{(x,y:nat;z:A)P}{forall (x y :~nat) (z:A), P} +\TRANS{(x,y,z,t:?)P}{forall x y z t, P} +\TRANS{(x,y:nat;z:?)P}{forall (x y :~nat) z, P} +\end{transbox} + +\subsubsection{Abstraction} + +The notation for $\lambda$-abstraction follows that of universal +quantification. The binders are surrounded by keyword \texttt{fun} +and \verb+=>+. + +\begin{transbox} +\TRANS{[x,y:nat; z](f a b c)}{fun (x y:nat) z => f a b c} +\end{transbox} + + +\subsubsection{Pattern-matching} + +Beside the usage of the keyword pair \TERM{match}/\TERM{with} instead of +\TERM{Cases}/\TERM{of}, the main change is the notation for the type of +branches and return type. It is no longer written between \TERM{$<$ $>$} before +the \TERM{Cases} keyword, but interleaved with the destructured objects. + +The idea is that for each destructured object, one may specify a +variable name (after the \TERM{as} keyword) to tell how the branches +types depend on this destructured objects (case of a dependent +elimination), and also how they depend on the value of the arguments +of the inductive type of the destructured objects (after the \TERM{in} +keyword). The type of branches is then given after the keyword +\TERM{return}, unless it can be inferred. + +Moreover, when the destructured object is a variable, one may use this +variable in the return type. + +\begin{transbox} +\TRANS{Cases n of\\~~ O => O \\| (S k) => (1) end}{match n with\\~~ 0 => 0 \\| S k => 1 end} +\TRANS{Cases m n of \\~~0 0 => t \\| ... end}{match m, n with \\~~0, 0 => t \\| ... end} +\TRANS{<[n:nat](P n)>Cases T of ... end}{match T as n return P n with ... end} +\TRANS{<[n:nat][p:(even n)]\~{}(odd n)>Cases p of\\~~ ... \\end}{match p in even n return \~{} odd n with\\~~ ...\\end} +\end{transbox} + +The annotations of the special pattern-matching operators +(\TERM{if}/\TERM{then}/\TERM{else}) and \TERM{let()} also changed. The +only restriction is that the destructuring \TERM{let} does not allow +dependent case analysis. + +\begin{transbox} +\TRANS{ + \begin{tabular}{@{}l} + <[n:nat;x:(I n)](P n x)>if t then t1 \\ + else t2 + \end{tabular}}% +{\begin{tabular}{@{}l} + if t as x in I n return P n x then t1 \\ + else t2 + \end{tabular}} +\TRANS{<[n:nat](P n)>let (p,q) = t1 in t2}% +{let (p,q) in I n return P n := t1 in t2} +\end{transbox} + + +\subsubsection{Fixpoints and cofixpoints} + +An simpler syntax for non-mutual fixpoints is provided, making it very close +to the usual notation for non-recursive functions. The decreasing argument +is now indicated by an annotation between curly braces, regardless of the +binders grouping. The annotation can be omitted if the binders introduce only +one variable. The type of the result can be omitted if inferable. + +\begin{transbox} +\TRANS{Fix plus\{plus [n:nat] : nat -> nat :=\\~~ [m]...\}}{fix plus (n m:nat) \{struct n\}: nat := ...} +\TRANS{Fix fact\{fact [n:nat]: nat :=\\ +~~Cases n of\\~~~~ O => (1) \\~~| (S k) => (mult n (fact k)) end\}}{fix fact + (n:nat) :=\\ +~~match n with \\~~~~0 => 1 \\~~| (S k) => n * fact k end} +\end{transbox} + +There is a syntactic sugar for single fixpoints (defining one +variable) associated to a local definition: + +\begin{transbox} +\TRANS{let f := Fix f \{f [x:A] : T := M\} in\\(g (f y))}{let fix f (x:A) : T := M in\\g (f x)} +\end{transbox} + +The same applies to cofixpoints, annotations are not allowed in that case. + +\subsubsection{Notation for type cast} + +\begin{transbox} +\TRANS{O :: nat}{0 : nat} +\end{transbox} + +\subsection{Main changes in tactics w.r.t. V7} + +The main change is that all tactic names are lowercase. This also holds for +Ltac keywords. + +\subsubsection{Renaming of induction tactics} + +\begin{transbox} +\TRANS{NewDestruct}{destruct} +\TRANS{NewInduction}{induction} +\TRANS{Induction}{simple induction} +\TRANS{Destruct}{simple destruct} +\end{transbox} + +\subsubsection{Ltac} + +Definitions of macros are introduced by \TERM{Ltac} instead of +\TERM{Tactic Definition}, \TERM{Meta Definition} or \TERM{Recursive +Definition}. They are considered recursive by default. + +\begin{transbox} +\TRANS{Meta Definition my_tac t1 t2 := t1; t2.}% +{Ltac my_tac t1 t2 := t1; t2.} +\end{transbox} + +Rules of a match command are not between square brackets anymore. + +Context (understand a term with a placeholder) instantiation \TERM{inst} +became \TERM{context}. Syntax is unified with subterm matching. + +\begin{transbox} +\TRANS{Match t With [C[x=y]] -> Inst C[y=x]}% +{match t with context C[x=y] => context C[y=x] end} +\end{transbox} + +Arguments of macros use the term syntax. If a general Ltac expression +is to be passed, it must be prefixed with ``{\tt ltac :}''. In other +cases, when a \'{} was necessary, it is replaced by ``{\tt constr :}'' + +\begin{transbox} +\TRANS{my_tac '(S x)}{my_tac (S x)} +\TRANS{my_tac (Let x=tac In x)}{my_tac ltac:(let x:=tac in x)} +\TRANS{Let x = '[x](S (S x)) In Apply x}% +{let x := constr:(fun x => S (S x)) in apply x} +\end{transbox} + +{\tt Match Context With} is now called {\tt match goal with}. Its +argument is an Ltac expression by default. + + +\subsubsection{Named arguments of theorems ({\em bindings})} + +\begin{transbox} +\TRANS{Apply thm with x:=t 1:=u}{apply thm with (x:=t) (1:=u)} +\end{transbox} + + +\subsubsection{Occurrences} + +To avoid ambiguity between a numeric literal and the optional +occurrence numbers of this term, the occurrence numbers are put after +the term itself and after keyword \TERM{as}. +\begin{transbox} +\TRANS{Pattern 1 2 (f x) 3 4 d y z}{pattern f x at 1 2, d at 3 4, y, z} +\end{transbox} + + +\subsubsection{{\tt LetTac} and {\tt Pose}} + +Tactic {\tt LetTac} was renamed into {\tt set}, and tactic {\tt Pose} +was a particular case of {\tt LetTac} where the abbreviation is folded +in the conclusion\footnote{There is a tactic called {\tt pose} in V8, +but its behaviour is not to fold the abbreviation at all.}. + +\begin{transbox} +\TRANS{LetTac x = t in H}{set (x := t) in H} +\TRANS{Pose x := t}{set (x := t)} +\end{transbox} + +{\tt LetTac} could be followed by a specification (called a clause) of +the places where the abbreviation had to be folded (hypothese and/or +conclusion). Clauses are the syntactic notion to denote in which parts +of a goal a given transformation shold occur. Its basic notation is +either \TERM{*} (meaning everywhere), or {\tt\textrm{\em hyps} |- +\textrm{\em concl}} where {\em hyps} is either \TERM{*} (to denote all +the hypotheses), or a comma-separated list of either hypothesis name, +or {\tt (value of $H$)} or {\tt (type of $H$)}. Moreover, occurrences +can be specified after every hypothesis after the {\TERM{at}} +keyword. {\em concl} is either empty or \TERM{*}, and can be followed +by occurences. + +\begin{transbox} +\TRANS{in Goal}{in |- *} +\TRANS{in H H1}{in H1, H2 |-} +\TRANS{in H H1 ...}{in * |-} +\TRANS{in H H1 Goal}{in H1, H2 |- *} +\TRANS{in H H1 H2 ... Goal}{in *} +\TRANS{in 1 2 H 3 4 H0 1 3 Goal}{in H at 1 2, H0 at 3 4 |- * at 1 3} +\end{transbox} + +\subsection{Main changes in vernacular commands w.r.t. V7} + + +\subsubsection{Require} + +The default behaviour of {\tt Require} is not to open the loaded +module. + +\begin{transbox} +\TRANS{Require Arith}{Require Import Arith} +\end{transbox} + +\subsubsection{Binders} + +The binders of vernacular commands changed in the same way as those of +fixpoints. This also holds for parameters of inductive definitions. + + +\begin{transbox} +\TRANS{Definition x [a:A] : T := M}{Definition x (a:A) : T := M} +\TRANS{Inductive and [A,B:Prop]: Prop := \\~~conj : A->B->(and A B)}% + {Inductive and (A B:Prop): Prop := \\~~conj : A -> B -> and A B} +\end{transbox} + +\subsubsection{Hints} + +Both {\tt Hints} and {\tt Hint} commands are beginning with {\tt Hint}. + +Command {\tt HintDestruct} has disappeared. + + +The syntax of \emph{Extern} hints changed: the pattern and the tactic +to be applied are separated by a {\tt =>}. +\begin{transbox} +\TRANS{Hint name := Resolve (f ? x)}% +{Hint Resolve (f _ x)} +\TRANS{Hint name := Extern 4 (toto ?) Apply lemma}% +{Hint Extern 4 (toto _) => apply lemma} +\TRANS{Hints Resolve x y z}{Hint Resolve x y z} +\TRANS{Hints Resolve f : db1 db2}{Hint Resolve f : db1 db2} +\TRANS{Hints Immediate x y z}{Hint Immediate x y z} +\TRANS{Hints Unfold x y z}{Hint Unfold x y z} +%% \TRANS{\begin{tabular}{@{}l} +%% HintDestruct Local Conclusion \\ +%% ~~name (f ? ?) 3 [Apply thm] +%% \end{tabular}}% +%% {\begin{tabular}{@{}l} +%% Hint Local Destuct name := \\ +%% ~~3 Conclusion (f _ _) => apply thm +%% \end{tabular}} +\end{transbox} + + +\subsubsection{Implicit arguments} + + +{\tt Set Implicit Arguments} changed its meaning in V8: the default is +to turn implicit only the arguments that are {\em strictly} implicit +(or rigid), i.e. that remains inferable whatever the other arguments +are. For instance {\tt x} inferable from {\tt P x} is not strictly +inferable since it can disappears if {\tt P} is instanciated by a term +which erases {\tt x}. + +\begin{transbox} +\TRANS{Set Implicit Arguments}% +{\begin{tabular}{l} + Set Implicit Arguments. \\ + Unset Strict Implicits. + \end{tabular}} +\end{transbox} + +However, you may wish to adopt the new semantics of {\tt Set Implicit +Arguments} (for instance because you think that the choice of +arguments it sets implicit is more ``natural'' for you). + + +\subsection{Changes in standard library} + +Many lemmas had their named changed to improve uniformity. The user +generally do not have to care since the translators performs the +renaming. + + Type {\tt entier} from fast_integer.v is renamed into {\tt N} by the +translator. As a consequence, user-defined objects of same name {\tt N} +are systematically qualified even tough it may not be necessary. The +following table lists the main names with which the same problem +arises: +\begin{transbox} +\TRANS{IF}{IF_then_else} +\TRANS{ZERO}{Z0} +\TRANS{POS}{Zpos} +\TRANS{NEG}{Zneg} +\TRANS{SUPERIEUR}{Gt} +\TRANS{EGAL}{Eq} +\TRANS{INFERIEUR}{Lt} +\TRANS{add}{Pplus} +\TRANS{true_sub}{Pminus} +\TRANS{entier}{N} +\TRANS{Un_suivi_de}{Ndouble_plus_one} +\TRANS{Zero_suivi_de}{Ndouble} +\TRANS{Nul}{N0} +\TRANS{Pos}{Npos} +\end{transbox} + + +\subsubsection{Implicit arguments} + +%% Hugo: +Main definitions of standard library have now implicit +arguments. These arguments are dropped in the translated files. This +can exceptionally be a source of incompatibilities which has to be +solved by hand (it typically happens for polymorphic functions applied +to {\tt nil} or {\tt None}). +%% preciser: avant ou apres trad ? + +\subsubsection{Logic about {\tt Type}} + +Many notations that applied to {\tt Set} have been extended to {\tt +Type}, so several definitions in {\tt Type} are superseded by them. + +\begin{transbox} +\TRANS{x==y}{x=y} +\TRANS{(EXT x:Prop | Q)}{exists x:Prop, Q} +\TRANS{identityT}{identity} +\end{transbox} + + + +%% Doc of the translator +\section{A guide to translation} +\label{Translation} + +%%\subsection{Overview of the translation process} + +Here is a short description of the tools involved in the translation process: +\begin{description} +\item{\tt coqc -translate} +is the automatic translator. It is a parser/pretty-printer. This means +that the translation is made by parsing every command using a parser +of old syntax, which is printed using the new syntax. Many efforts +were made to preserve as much as possible of the quality of the +presentation: it avoids expansion of syntax extensions, comments are +not discarded and placed at the same place. +\item{\tt translate-v8} (in the translation package) is a small +shell-script that will help translate developments that compile with a +Makefile with minimum requirements. +\end{description} + +\subsection{Preparation to translation} + +This step is very important because most of work shall be done before +translation. If a problem occurs during translation, it often means +that you will have to modify the original source and restart the +translation process. This also means that it is recommended not to +edit the output of the translator since it would be overwritten if +the translation has to be restarted. + +\subsubsection{Compilation with {\tt coqc -v7}} + +First of all, it is mandatory that files compile with the current +version of Coq (8.0) with option {\tt -v7}. Translation is a +complicated task that involves the full compilation of the +development. If your development was compiled with older versions, +first upgrade to Coq V8.0 with option {\tt -v7}. If you use a Makefile +similar to those produced by {\tt coq\_makefile}, you probably just +have to do + +{\tt make OPT="-opt -v7"} ~~~or~~~ {\tt make OPT="-byte -v7"} + +When the development compiles successfully, there are several changes +that might be necessary for the translation. Essentially, this is +about syntax extensions (see section below dedicated to porting syntax +extensions). If you do not use such features, then you are ready to +try and make the translation. + +\subsection{Translation} + +\subsubsection{The general case} + +The preferred way is to use script {\tt translate-v8} if your development +is compiled by a Makefile with the following constraints: +\begin{itemize} +\item compilation is achieved by invoking make without specifying a target +\item options are passed to Coq with make variable COQFLAGS that + includes variables OPT, COQLIBS, OTHERFLAGS and COQ_XML. +\end{itemize} +These constraints are met by the makefiles produced by {\tt coq\_makefile} + +Otherwise, modify your build program so as to pass option {\tt +-translate} to program {\tt coqc}. The effect of this option is to +ouptut the translated source of any {\tt .v} file in a file with +extension {\tt .v8} located in the same directory than the original +file. + +\subsubsection{What may happen during the translation} + +This section describes events that may happen during the +translation and measures to adopt. + +These are the warnings that may arise during the translation, but they +generally do not require any modification for the user: +Warnings: +\begin{itemize} +\item {\tt Unable to detect if $id$ denotes a local definition}\\ +This is due to a semantic change in clauses. In a command such as {\tt +simpl in H}, the old semantics were to perform simplification in the +type of {\tt H}, or in its body if it is defined. With the new +semantics, it is performed both in the type and the body (if any). It +might lead to incompatibilities + +\item {\tt Forgetting obsolete module}\\ +Some modules have disappeared in V8.0 (new syntax). The user does not +need to worry about it, since the translator deals with it. + +\item {\tt Replacing obsolete module}\\ +Same as before but with the module that were renamed. Here again, the +translator deals with it. +\end{itemize} + +\subsection{Verification of the translation} + +The shell-script {\tt translate-v8} also renames {\tt .v8} files into +{\tt .v} files (older {\tt .v} files are put in a subdirectory called +{\tt v7}) and tries to recompile them. To do so it invokes {\tt make} +without option (which should cause the compilation using {\tt coqc} +without particular option). + +If compilation fails at this stage, you should refrain from repairing +errors manually on the new syntax, but rather modify the old syntax +script and restart the translation. We insist on that because the +problem encountered can show up in many instances (especially if the +problem comes from a syntactic extension), and fixing the original +sources (for instance the {\tt V8only} parts of notations) once will +solve all occurrences of the problem. + +%%\subsubsection{Errors occurring after translation} +%%Equality in {\tt Z} or {\tt R}... + +\subsection{Particular cases} + +\subsubsection{Lexical conventions} + +The definition of identifiers changed. Most of those changes are +handled by the translator. They include: +\begin{itemize} +\item {\tt \_} is not an identifier anymore: it is tranlated to {\tt +x\_} +\item avoid clash with new keywords by adding a trailing {\tt \_} +\end{itemize} + +If the choices made by translation is not satisfactory +or in the following cases: +\begin{itemize} +\item use of latin letters +\item use of iso-latin characters in notations +\end{itemize} +the user should change his development prior to translation. + +\subsubsection{{\tt Case} and {\tt Match}} + +These very low-level case analysis are no longer supported. The +translator tries hard to translate them into a user-friendly one, but +it might lack type information to do so\footnote{The translator tries +to typecheck terms before printing them, but it is not always possible +to determine the context in which terms appearing in tactics +live.}. If this happens, it is preferable to transform it manually +before translation. + +\subsubsection{Syntax extensions with {\tt Grammar} and {\tt Syntax}} + + +{\tt Grammar} and {\tt Syntax} are no longer supported. They +should be replaced by an equivalent {\tt Notation} command and be +processed as described above. Before attempting translation, users +should verify that compilation with option {\tt -v7} succeeds. + +In the cases where {\tt Grammar} and {\tt Syntax} cannot be emulated +by {\tt Notation}, users have to change manually they development as +they wish to avoid the use of {\tt Grammar}. If this is not done, the +translator will simply expand the notations and the output of the +translator will use the regular Coq syntax. + +\subsubsection{Syntax extensions with {\tt Notation} and {\tt Infix}} + +These commands do not necessarily need to be changed. + +Some work will have to be done manually if the notation conflicts with +the new syntax (for instance, using keywords like {\tt fun} or {\tt +exists}, overloading of symbols of the old syntax, etc.) or if the +precedences are not right. + + Precedence levels are now from 0 to 200. In V8, the precedence and +associativity of an operator cannot be redefined. Typical level are +(refer to the chapter on notations in the Reference Manual for the +full list): + +\begin{center} +\begin{tabular}{|cll|} +\hline +Notation & Precedence & Associativity \\ +\hline +\verb!_ <-> _! & 95 & no \\ +\verb!_ \/ _! & 85 & right \\ +\verb!_ /\ _! & 80 & right \\ +\verb!~ _! & 75 & right \\ +\verb!_ = _!, \verb!_ <> _!, \verb!_ < _!, \verb!_ > _!, + \verb!_ <= _!, \verb!_ >= _! & 70 & no \\ +\verb!_ + _!, \verb!_ - _! & 50 & left \\ +\verb!_ * _!, \verb!_ / _! & 40 & left \\ +\verb!- _! & 35 & right \\ +\verb!_ ^ _! & 30 & left \\ +\hline +\end{tabular} +\end{center} + + + By default, the translator keeps the associativity given in V7 while +the levels are mapped according to the following table: + +\begin{center} +\begin{tabular}{l|l|l} +V7 level & mapped to & associativity \\ +\hline +0 & 0 & no \\ +1 & 20 & left \\ +2 & 30 & right \\ +3 & 40 & left \\ +4 & 50 & left \\ +5 & 70 & no \\ +6 & 80 & right \\ +7 & 85 & right \\ +8 & 90 & right \\ +9 & 95 & no \\ +10 & 100 & left +\end{tabular} +\end{center} + +If this is OK, just simply apply the translator. + + +\paragraph{Associativity conflict} + + Since the associativity of the levels obtained by translating a V7 +level (as shown on table above) cannot be changed, you have to choose +another level with a compatible associativity. + + You can choose any level between 0 and 200, knowing that the +standard operators are already set at the levels shown on the list +above. + +Assume you have a notation +\begin{verbatim} +Infix NONA 2 "=_S" my_setoid_eq. +\end{verbatim} +By default, the translator moves it to level 30 which is right +associative, hence a conflict with the expected no associativity. + +To solve the problem, just add the "V8only" modifier to reset the +level and enforce the associativity as follows: +\begin{verbatim} +Infix NONA 2 "=_S" my_setoid_eq V8only (at level 70, no associativity). +\end{verbatim} +The translator now knows that it has to translate "=_S" at level 70 +with no associativity. + +Remark: 70 is the "natural" level for relations, hence the choice of 70 +here, but any other level accepting a no-associativity would have been +OK. + +Second example: assume you have a notation +\begin{verbatim} +Infix RIGHTA 1 "o" my_comp. +\end{verbatim} +By default, the translator moves it to level 20 which is left +associative, hence a conflict with the expected right associativity. + +To solve the problem, just add the "V8only" modifier to reset the +level and enforce the associativity as follows: +\begin{verbatim} +Infix RIGHTA 1 "o" my_comp V8only (at level 20, right associativity). +\end{verbatim} +The translator now knows that it has to translate "o" at level 20 +which has the correct "right associativity". + +Remark: we assumed here that the user wants a strong precedence for +composition, in such a way, say, that "f o g + h" is parsed as +"(f o g) + h". To get "o" binding less than the arithmetical operators, +an appropriated level would have been close of 70, and below, e.g. 65. + + +\paragraph{Conflict: notation hides another notation} + +Remark: use {\tt Print Grammar constr} in V8 to diagnose the overlap +and see the section on factorization in the chapter on notations of +the Reference Manual for hints on how to factorize. + +Example: +\begin{verbatim} +Notation "{ x }" := (my_embedding x) (at level 1). +\end{verbatim} +overlaps in V8 with notation \verb#{ x : A & P }# at level 0 and with +x at level 99. The conflicts can be solved by left-factorizing the +notation as follows: +\begin{verbatim} +Notation "{ x }" := (my_embedding x) (at level 1) + V8only (at level 0, x at level 99). +\end{verbatim} + +\paragraph{Conflict: a notation conflicts with the V8 grammar} + +Again, use the {\tt V8only} modifier to tell the translator to +automatically take in charge the new syntax. + +Example: +\begin{verbatim} +Infix 3 "@" app. +\end{verbatim} +Since {\tt @} is used in the new syntax for deactivating the implicit +arguments, another symbol has to be used, e.g. {\tt @@}. This is done via +the {\tt V8only} option as follows: +\begin{verbatim} +Infix 3 "@" app V8only "@@" (at level 40, left associativity). +\end{verbatim} +or, alternatively by +\begin{verbatim} +Notation "x @ y" := (app x y) (at level 3, left associativity) + V8only "x @@ y" (at level 40, left associativity). +\end{verbatim} + +\paragraph{Conflict: my notation is already defined at another level + (or with another associativity)} + +In V8, the level and associativity of a given notation can no longer +be changed. Then, either you adopt the standard reserved levels and +associativity for this notation (as given on the list above) or you +change your notation. +\begin{itemize} +\item To change the notation, follow the directions in the previous +paragraph +\item To adopt the standard level, just use {\tt V8only} without any +argument. +\end{itemize} + +Example: +\begin{verbatim} +Infix 6 "*" my_mult. +\end{verbatim} +is not accepted as such in V8. Write +\begin{verbatim} +Infix 6 "*" my_mult V8only. +\end{verbatim} +to tell the translator to use {\tt *} at the reserved level (i.e. 40 +with left associativity). Even better, use interpretation scopes (look +at the Reference Manual). + + +\subsubsection{Strict implicit arguments} + +In the case you want to adopt the new semantics of {\tt Set Implicit + Arguments} (only setting rigid arguments as implicit), add the option +{\tt -strict-implicit} to the translator. + +Warning: changing the number of implicit arguments can break the +notations. Then use the {\tt V8only} modifier of {\tt Notation}. + +\end{document} diff --git a/doc/tutorial/Tutorial.tex b/doc/tutorial/Tutorial.tex new file mode 100755 index 00000000..7c840509 --- /dev/null +++ b/doc/tutorial/Tutorial.tex @@ -0,0 +1,1584 @@ +\documentclass[11pt,a4paper]{book} +\usepackage[T1]{fontenc} +\usepackage[latin1]{inputenc} +\usepackage{pslatex} + +\input{../common/version.tex} +\input{../common/macros.tex} +\input{../common/title.tex} + +%\makeindex + +\begin{document} +\coverpage{A Tutorial}{Gérard Huet, Gilles Kahn and Christine Paulin-Mohring}{} + +%\tableofcontents + +\chapter*{Getting started} + +\Coq\ is a Proof Assistant for a Logical Framework known as the Calculus +of Inductive Constructions. It allows the interactive construction of +formal proofs, and also the manipulation of functional programs +consistently with their specifications. It runs as a computer program +on many architectures. +%, and mainly on Unix machines. +It is available with a variety of user interfaces. The present +document does not attempt to present a comprehensive view of all the +possibilities of \Coq, but rather to present in the most elementary +manner a tutorial on the basic specification language, called Gallina, +in which formal axiomatisations may be developed, and on the main +proof tools. For more advanced information, the reader could refer to +the \Coq{} Reference Manual or the \textit{Coq'Art}, a new book by Y. +Bertot and P. Castéran on practical uses of the \Coq{} system. + +We assume here that the potential user has installed \Coq~ on his workstation, +that he calls \Coq~ from a standard teletype-like shell window, and that +he does not use any special interface. +Instructions on installation procedures, as well as more comprehensive +documentation, may be found in the standard distribution of \Coq, +which may be obtained from \Coq{} web site \texttt{http://coq.inria.fr}. + +In the following, all examples preceded by the prompting sequence +\verb:Coq < : represent user input, terminated by a period. The +following lines usually show \Coq's answer as it appears on the users +screen. The sequence of such examples is a valid \Coq~ session, unless +otherwise specified. This version of the tutorial has been prepared +on a PC workstation running Linux. +The standard invocation of \Coq\ delivers a message such as: + +\begin{small} +\begin{flushleft} +\begin{verbatim} +unix:~> coqtop +Welcome to Coq 8.0 (Mar 2004) + +Coq < +\end{verbatim} +\end{flushleft} +\end{small} + +The first line gives a banner stating the precise version of \Coq~ +used. You should always return this banner when you report an +anomaly to our hot-line \verb:coq-bugs@pauillac.inria.fr: or on our +bug-tracking system~:\verb:http://coq.inria.fr/bin/coq-bugs: + +\chapter{Basic Predicate Calculus} + +\section{An overview of the specification language Gallina} + +A formal development in Gallina consists in a sequence of {\sl declarations} +and {\sl definitions}. You may also send \Coq~ {\sl commands} which are +not really part of the formal development, but correspond to information +requests, or service routine invocations. For instance, the command: +\begin{verbatim} +Coq < Quit. +\end{verbatim} +terminates the current session. + +\subsection{Declarations} + +A declaration associates a {\sl name} with +a {\sl specification}. +A name corresponds roughly to an identifier in a programming +language, i.e. to a string of letters, digits, and a few ASCII symbols like +underscore (\verb"_") and prime (\verb"'"), starting with a letter. +We use case distinction, so that the names \verb"A" and \verb"a" are distinct. +Certain strings are reserved as key-words of \Coq, and thus are forbidden +as user identifiers. + +A specification is a formal expression which classifies the notion which is +being declared. There are basically three kinds of specifications: +{\sl logical propositions}, {\sl mathematical collections}, and +{\sl abstract types}. They are classified by the three basic sorts +of the system, called respectively \verb:Prop:, \verb:Set:, and +\verb:Type:, which are themselves atomic abstract types. + +Every valid expression $e$ in Gallina is associated with a specification, +itself a valid expression, called its {\sl type} $\tau(E)$. We write +$e:\tau(E)$ for the judgment that $e$ is of type $E$. +%CP Le role de \tau n'est pas clair. +You may request \Coq~ to return to you the type of a valid expression by using +the command \verb:Check:: + +\begin{coq_example} +Check O. +\end{coq_example} + +Thus we know that the identifier \verb:O: (the name `O', not to be +confused with the numeral `0' which is not a proper identifier!) is +known in the current context, and that its type is the specification +\verb:nat:. This specification is itself classified as a mathematical +collection, as we may readily check: + +\begin{coq_example} +Check nat. +\end{coq_example} + +The specification \verb:Set: is an abstract type, one of the basic +sorts of the Gallina language, whereas the notions $nat$ and $O$ are +notions which are defined in the arithmetic prelude, +automatically loaded when running the \Coq\ system. + +We start by introducing a so-called section name. The role of sections +is to structure the modelisation by limiting the scope of parameters, +hypotheses and definitions. It will also give a convenient way to +reset part of the development. + +\begin{coq_example} +Section Declaration. +\end{coq_example} +With what we already know, we may now enter in the system a declaration, +corresponding to the informal mathematics {\sl let n be a natural + number}. + +\begin{coq_example} +Variable n : nat. +\end{coq_example} + +If we want to translate a more precise statement, such as +{\sl let n be a positive natural number}, +we have to add another declaration, which will declare explicitly the +hypothesis \verb:Pos_n:, with specification the proper logical +proposition: +\begin{coq_example} +Hypothesis Pos_n : (gt n 0). +\end{coq_example} + +Indeed we may check that the relation \verb:gt: is known with the right type +in the current context: + +\begin{coq_example} +Check gt. +\end{coq_example} + +which tells us that \verb:gt: is a function expecting two arguments of +type \verb:nat: in order to build a logical proposition. +What happens here is similar to what we are used to in a functional +programming language: we may compose the (specification) type \verb:nat: +with the (abstract) type \verb:Prop: of logical propositions through the +arrow function constructor, in order to get a functional type +\verb:nat->Prop:: +\begin{coq_example} +Check (nat -> Prop). +\end{coq_example} +which may be composed again with \verb:nat: in order to obtain the +type \verb:nat->nat->Prop: of binary relations over natural numbers. +Actually \verb:nat->nat->Prop: is an abbreviation for +\verb:nat->(nat->Prop):. + +Functional notions may be composed in the usual way. An expression $f$ +of type $A\ra B$ may be applied to an expression $e$ of type $A$ in order +to form the expression $(f~e)$ of type $B$. Here we get that +the expression \verb:(gt n): is well-formed of type \verb:nat->Prop:, +and thus that the expression \verb:(gt n O):, which abbreviates +\verb:((gt n) O):, is a well-formed proposition. +\begin{coq_example} +Check gt n O. +\end{coq_example} + +\subsection{Definitions} + +The initial prelude contains a few arithmetic definitions: +\verb:nat: is defined as a mathematical collection (type \verb:Set:), constants +\verb:O:, \verb:S:, \verb:plus:, are defined as objects of types +respectively \verb:nat:, \verb:nat->nat:, and \verb:nat->nat->nat:. +You may introduce new definitions, which link a name to a well-typed value. +For instance, we may introduce the constant \verb:one: as being defined +to be equal to the successor of zero: +\begin{coq_example} +Definition one := (S O). +\end{coq_example} +We may optionally indicate the required type: +\begin{coq_example} +Definition two : nat := S one. +\end{coq_example} + +Actually \Coq~ allows several possible syntaxes: +\begin{coq_example} +Definition three : nat := S two. +\end{coq_example} + +Here is a way to define the doubling function, which expects an +argument \verb:m: of type \verb:nat: in order to build its result as +\verb:(plus m m):: + +\begin{coq_example} +Definition double (m:nat) := plus m m. +\end{coq_example} +This definition introduces the constant \texttt{double} defined as the +expression \texttt{fun m:nat => plus m m}. +The abstraction introduced by \texttt{fun} is explained as follows. The expression +\verb+fun x:A => e+ is well formed of type \verb+A->B+ in a context +whenever the expression \verb+e+ is well-formed of type \verb+B+ in +the given context to which we add the declaration that \verb+x+ +is of type \verb+A+. Here \verb+x+ is a bound, or dummy variable in +the expression \verb+fun x:A => e+. For instance we could as well have +defined \verb:double: as \verb+fun n:nat => (plus n n)+. + +Bound (local) variables and free (global) variables may be mixed. +For instance, we may define the function which adds the constant \verb:n: +to its argument as +\begin{coq_example} +Definition add_n (m:nat) := plus m n. +\end{coq_example} +However, note that here we may not rename the formal argument $m$ into $n$ +without capturing the free occurrence of $n$, and thus changing the meaning +of the defined notion. + +Binding operations are well known for instance in logic, where they +are called quantifiers. Thus we may universally quantify a +proposition such as $m>0$ in order to get a universal proposition +$\forall m\cdot m>0$. Indeed this operator is available in \Coq, with +the following syntax: \verb+forall m:nat, gt m O+. Similarly to the +case of the functional abstraction binding, we are obliged to declare +explicitly the type of the quantified variable. We check: +\begin{coq_example} +Check (forall m:nat, gt m 0). +\end{coq_example} +We may clean-up the development by removing the contents of the +current section: +\begin{coq_example} +Reset Declaration. +\end{coq_example} + +\section{Introduction to the proof engine: Minimal Logic} + +In the following, we are going to consider various propositions, built +from atomic propositions $A, B, C$. This may be done easily, by +introducing these atoms as global variables declared of type \verb:Prop:. +It is easy to declare several names with the same specification: +\begin{coq_example} +Section Minimal_Logic. +Variables A B C : Prop. +\end{coq_example} + +We shall consider simple implications, such as $A\ra B$, read as +``$A$ implies $B$''. Remark that we overload the arrow symbol, which +has been used above as the functionality type constructor, and which +may be used as well as propositional connective: +\begin{coq_example} +Check (A -> B). +\end{coq_example} + +Let us now embark on a simple proof. We want to prove the easy tautology +$((A\ra (B\ra C))\ra (A\ra B)\ra (A\ra C)$. +We enter the proof engine by the command +\verb:Goal:, followed by the conjecture we want to verify: +\begin{coq_example} +Goal (A -> B -> C) -> (A -> B) -> A -> C. +\end{coq_example} + +The system displays the current goal below a double line, local hypotheses +(there are none initially) being displayed above the line. We call +the combination of local hypotheses with a goal a {\sl judgment}. +%The new prompt \verb:Unnamed_thm <: indicates that. +We are now in an inner +loop of the system, in proof mode. +New commands are available in this +mode, such as {\sl tactics}, which are proof combining primitives. +A tactic operates on the current goal by attempting to construct a proof +of the corresponding judgment, possibly from proofs of some +hypothetical judgments, which are then added to the current +list of conjectured judgments. +For instance, the \verb:intro: tactic is applicable to any judgment +whose goal is an implication, by moving the proposition to the left +of the application to the list of local hypotheses: +\begin{coq_example} +intro H. +\end{coq_example} + +%{\bf Warning} to users of \Coq~ previous versions: The display of a sequent in +%older versions of \Coq~ is inverse of this convention: the goal is displayed +%above the double line, the hypotheses below. + +Several introductions may be done in one step: +\begin{coq_example} +intros H' HA. +\end{coq_example} + +We notice that $C$, the current goal, may be obtained from hypothesis +\verb:H:, provided the truth of $A$ and $B$ are established. +The tactic \verb:apply: implements this piece of reasoning: +\begin{coq_example} +apply H. +\end{coq_example} + +We are now in the situation where we have two judgments as conjectures +that remain to be proved. Only the first is listed in full, for the +others the system displays only the corresponding subgoal, without its +local hypotheses list. Remark that \verb:apply: has kept the local +hypotheses of its father judgment, which are still available for +the judgments it generated. + +In order to solve the current goal, we just have to notice that it is +exactly available as hypothesis $HA$: +\begin{coq_example} +exact HA. +\end{coq_example} + +Now $H'$ applies: +\begin{coq_example} +apply H'. +\end{coq_example} + +And we may now conclude the proof as before, with \verb:exact HA.: +Actually, we may not bother with the name \verb:HA:, and just state that +the current goal is solvable from the current local assumptions: +\begin{coq_example} +assumption. +\end{coq_example} + +The proof is now finished. We may either discard it, by using the +command \verb:Abort: which returns to the standard \Coq~ toplevel loop +without further ado, or else save it as a lemma in the current context, +under name say \verb:trivial_lemma:: +\begin{coq_example} +Save trivial_lemma. +\end{coq_example} + +As a comment, the system shows the proof script listing all tactic +commands used in the proof. % ligne blanche apres exact HA?? + +Let us redo the same proof with a few variations. First of all we may name +the initial goal as a conjectured lemma: +\begin{coq_example} +Lemma distr_impl : (A -> B -> C) -> (A -> B) -> A -> C. +\end{coq_example} + +%{\bf Warning} to users of \Coq~ older versions: In order to enter the proof +%engine, at this point a dummy \verb:Goal.: command had to be typed in. + +Next, we may omit the names of local assumptions created by the introduction +tactics, they can be automatically created by the proof engine as new +non-clashing names. +\begin{coq_example} +intros. +\end{coq_example} + +The \verb:intros: tactic, with no arguments, effects as many individual +applications of \verb:intro: as is legal. + +Then, we may compose several tactics together in sequence, or in parallel, +through {\sl tacticals}, that is tactic combinators. The main constructions +are the following: +\begin{itemize} +\item $T_1 ; T_2$ (read $T_1$ then $T_2$) applies tactic $T_1$ to the current +goal, and then tactic $T_2$ to all the subgoals generated by $T_1$. +\item $T; [T_1 | T_2 | ... | T_n]$ applies tactic $T$ to the current +goal, and then tactic $T_1$ to the first newly generated subgoal, +..., $T_n$ to the nth. +\end{itemize} + +We may thus complete the proof of \verb:distr_impl: with one composite tactic: +\begin{coq_example} +apply H; [ assumption | apply H0; assumption ]. +\end{coq_example} + +Let us now save lemma \verb:distr_impl:: +\begin{coq_example} +Save. +\end{coq_example} + +Here \verb:Save: needs no argument, since we gave the name \verb:distr_impl: +in advance; +it is however possible to override the given name by giving a different +argument to command \verb:Save:. + +Actually, such an easy combination of tactics \verb:intro:, \verb:apply: +and \verb:assumption: may be found completely automatically by an automatic +tactic, called \verb:auto:, without user guidance: +\begin{coq_example} +Lemma distr_imp : (A -> B -> C) -> (A -> B) -> A -> C. +auto. +\end{coq_example} + +This time, we do not save the proof, we just discard it with the \verb:Abort: +command: + +\begin{coq_example} +Abort. +\end{coq_example} + +At any point during a proof, we may use \verb:Abort: to exit the proof mode +and go back to Coq's main loop. We may also use \verb:Restart: to restart +from scratch the proof of the same lemma. We may also use \verb:Undo: to +backtrack one step, and more generally \verb:Undo n: to +backtrack n steps. + +We end this section by showing a useful command, \verb:Inspect n.:, +which inspects the global \Coq~ environment, showing the last \verb:n: declared +notions: % Attention ici ?? +\begin{coq_example} +Inspect 3. +\end{coq_example} + +The declarations, whether global parameters or axioms, are shown preceded by +\verb:***:; definitions and lemmas are stated with their specification, but +their value (or proof-term) is omitted. + +\section{Propositional Calculus} + +\subsection{Conjunction} + +We have seen how \verb:intro: and \verb:apply: tactics could be combined +in order to prove implicational statements. More generally, \Coq~ favors a style +of reasoning, called {\sl Natural Deduction}, which decomposes reasoning into +so called {\sl introduction rules}, which tell how to prove a goal whose main +operator is a given propositional connective, and {\sl elimination rules}, +which tell how to use an hypothesis whose main operator is the propositional +connective. Let us show how to use these ideas for the propositional connectives +\verb:/\: and \verb:\/:. + +\begin{coq_example} +Lemma and_commutative : A /\ B -> B /\ A. +intro. +\end{coq_example} + +We make use of the conjunctive hypothesis \verb:H: with the \verb:elim: tactic, +which breaks it into its components: +\begin{coq_example} +elim H. +\end{coq_example} + +We now use the conjunction introduction tactic \verb:split:, which splits the +conjunctive goal into the two subgoals: +\begin{coq_example} +split. +\end{coq_example} + +and the proof is now trivial. Indeed, the whole proof is obtainable as follows: +\begin{coq_example} +Restart. +intro H; elim H; auto. +Qed. +\end{coq_example} + +The tactic \verb:auto: succeeded here because it knows as a hint the +conjunction introduction operator \verb+conj+ +\begin{coq_example} +Check conj. +\end{coq_example} + +Actually, the tactic \verb+Split+ is just an abbreviation for \verb+apply conj.+ + +What we have just seen is that the \verb:auto: tactic is more powerful than +just a simple application of local hypotheses; it tries to apply as well +lemmas which have been specified as hints. A +\verb:Hint Resolve: command registers a +lemma as a hint to be used from now on by the \verb:auto: tactic, whose power +may thus be incrementally augmented. + +\subsection{Disjunction} + +In a similar fashion, let us consider disjunction: + +\begin{coq_example} +Lemma or_commutative : A \/ B -> B \/ A. +intro H; elim H. +\end{coq_example} + +Let us prove the first subgoal in detail. We use \verb:intro: in order to +be left to prove \verb:B\/A: from \verb:A:: + +\begin{coq_example} +intro HA. +\end{coq_example} + +Here the hypothesis \verb:H: is not needed anymore. We could choose to +actually erase it with the tactic \verb:clear:; in this simple proof it +does not really matter, but in bigger proof developments it is useful to +clear away unnecessary hypotheses which may clutter your screen. +\begin{coq_example} +clear H. +\end{coq_example} + +The disjunction connective has two introduction rules, since \verb:P\/Q: +may be obtained from \verb:P: or from \verb:Q:; the two corresponding +proof constructors are called respectively \verb:or_introl: and +\verb:or_intror:; they are applied to the current goal by tactics +\verb:left: and \verb:right: respectively. For instance: +\begin{coq_example} +right. +trivial. +\end{coq_example} +The tactic \verb:trivial: works like \verb:auto: with the hints +database, but it only tries those tactics that can solve the goal in one +step. + +As before, all these tedious elementary steps may be performed automatically, +as shown for the second symmetric case: + +\begin{coq_example} +auto. +\end{coq_example} + +However, \verb:auto: alone does not succeed in proving the full lemma, because +it does not try any elimination step. +It is a bit disappointing that \verb:auto: is not able to prove automatically +such a simple tautology. The reason is that we want to keep +\verb:auto: efficient, so that it is always effective to use. + +\subsection{Tauto} + +A complete tactic for propositional +tautologies is indeed available in \Coq~ as the \verb:tauto: tactic. +%In order to get this facility, we have to import a library module +%called ``Dyckhoff'': +\begin{coq_example} +Restart. +tauto. +Qed. +\end{coq_example} + +It is possible to inspect the actual proof tree constructed by \verb:tauto:, +using a standard command of the system, which prints the value of any notion +currently defined in the context: +\begin{coq_example} +Print or_commutative. +\end{coq_example} + +It is not easy to understand the notation for proof terms without a few +explanations. The \texttt{fun} prefix, such as \verb+fun H:A\/B =>+, +corresponds +to \verb:intro H:, whereas a subterm such as +\verb:(or_intror: \verb:B H0): +corresponds to the sequence \verb:apply or_intror; exact H0:. +The generic combinator \verb:or_intror: needs to be instantiated by +the two properties \verb:B: and \verb:A:. Because \verb:A: can be +deduced from the type of \verb:H0:, only \verb:B: is printed. +The two instantiations are effected automatically by the tactic +\verb:apply: when pattern-matching a goal. The specialist will of course +recognize our proof term as a $\lambda$-term, used as notation for the +natural deduction proof term through the Curry-Howard isomorphism. The +naive user of \Coq~ may safely ignore these formal details. + +Let us exercise the \verb:tauto: tactic on a more complex example: +\begin{coq_example} +Lemma distr_and : A -> B /\ C -> (A -> B) /\ (A -> C). +tauto. +Qed. +\end{coq_example} + +\subsection{Classical reasoning} + +\verb:tauto: always comes back with an answer. Here is an example where it +fails: +\begin{coq_example} +Lemma Peirce : ((A -> B) -> A) -> A. +try tauto. +\end{coq_example} + +Note the use of the \verb:Try: tactical, which does nothing if its tactic +argument fails. + +This may come as a surprise to someone familiar with classical reasoning. +Peirce's lemma is true in Boolean logic, i.e. it evaluates to \verb:true: for +every truth-assignment to \verb:A: and \verb:B:. Indeed the double negation +of Peirce's law may be proved in \Coq~ using \verb:tauto:: +\begin{coq_example} +Abort. +Lemma NNPeirce : ~ ~ (((A -> B) -> A) -> A). +tauto. +Qed. +\end{coq_example} + +In classical logic, the double negation of a proposition is equivalent to this +proposition, but in the constructive logic of \Coq~ this is not so. If you +want to use classical logic in \Coq, you have to import explicitly the +\verb:Classical: module, which will declare the axiom \verb:classic: +of excluded middle, and classical tautologies such as de Morgan's laws. +The \verb:Require: command is used to import a module from \Coq's library: +\begin{coq_example} +Require Import Classical. +Check NNPP. +\end{coq_example} + +and it is now easy (although admittedly not the most direct way) to prove +a classical law such as Peirce's: +\begin{coq_example} +Lemma Peirce : ((A -> B) -> A) -> A. +apply NNPP; tauto. +Qed. +\end{coq_example} + +Here is one more example of propositional reasoning, in the shape of +a Scottish puzzle. A private club has the following rules: +\begin{enumerate} +\item Every non-scottish member wears red socks +\item Every member wears a kilt or doesn't wear red socks +\item The married members don't go out on Sunday +\item A member goes out on Sunday if and only if he is Scottish +\item Every member who wears a kilt is Scottish and married +\item Every scottish member wears a kilt +\end{enumerate} +Now, we show that these rules are so strict that no one can be accepted. +\begin{coq_example} +Section club. +Variables Scottish RedSocks WearKilt Married GoOutSunday : Prop. +Hypothesis rule1 : ~ Scottish -> RedSocks. +Hypothesis rule2 : WearKilt \/ ~ RedSocks. +Hypothesis rule3 : Married -> ~ GoOutSunday. +Hypothesis rule4 : GoOutSunday <-> Scottish. +Hypothesis rule5 : WearKilt -> Scottish /\ Married. +Hypothesis rule6 : Scottish -> WearKilt. +Lemma NoMember : False. +tauto. +Qed. +\end{coq_example} +At that point \verb:NoMember: is a proof of the absurdity depending on +hypotheses. +We may end the section, in that case, the variables and hypotheses +will be discharged, and the type of \verb:NoMember: will be +generalised. + +\begin{coq_example} +End club. +Check NoMember. +\end{coq_example} + +\section{Predicate Calculus} + +Let us now move into predicate logic, and first of all into first-order +predicate calculus. The essence of predicate calculus is that to try to prove +theorems in the most abstract possible way, without using the definitions of +the mathematical notions, but by formal manipulations of uninterpreted +function and predicate symbols. + +\subsection{Sections and signatures} + +Usually one works in some domain of discourse, over which range the individual +variables and function symbols. In \Coq~ we speak in a language with a rich +variety of types, so me may mix several domains of discourse, in our +multi-sorted language. For the moment, we just do a few exercises, over a +domain of discourse \verb:D: axiomatised as a \verb:Set:, and we consider two +predicate symbols \verb:P: and \verb:R: over \verb:D:, of arities +respectively 1 and 2. Such abstract entities may be entered in the context +as global variables. But we must be careful about the pollution of our +global environment by such declarations. For instance, we have already +polluted our \Coq~ session by declaring the variables +\verb:n:, \verb:Pos_n:, \verb:A:, \verb:B:, and \verb:C:. If we want to revert to the clean state of +our initial session, we may use the \Coq~ \verb:Reset: command, which returns +to the state just prior the given global notion as we did before to +remove a section, or we may return to the initial state using~: +\begin{coq_example} +Reset Initial. +\end{coq_example} + +We shall now declare a new \verb:Section:, which will allow us to define +notions local to a well-delimited scope. We start by assuming a domain of +discourse \verb:D:, and a binary relation \verb:R: over \verb:D:: +\begin{coq_example} +Section Predicate_calculus. +Variable D : Set. +Variable R : D -> D -> Prop. +\end{coq_example} + +As a simple example of predicate calculus reasoning, let us assume +that relation \verb:R: is symmetric and transitive, and let us show that +\verb:R: is reflexive in any point \verb:x: which has an \verb:R: successor. +Since we do not want to make the assumptions about \verb:R: global axioms of +a theory, but rather local hypotheses to a theorem, we open a specific +section to this effect. +\begin{coq_example} +Section R_sym_trans. +Hypothesis R_symmetric : forall x y:D, R x y -> R y x. +Hypothesis R_transitive : forall x y z:D, R x y -> R y z -> R x z. +\end{coq_example} + +Remark the syntax \verb+forall x:D,+ which stands for universal quantification +$\forall x : D$. + +\subsection{Existential quantification} + +We now state our lemma, and enter proof mode. +\begin{coq_example} +Lemma refl_if : forall x:D, (exists y, R x y) -> R x x. +\end{coq_example} + +Remark that the hypotheses which are local to the currently opened sections +are listed as local hypotheses to the current goals. +The rationale is that these hypotheses are going to be discharged, as we +shall see, when we shall close the corresponding sections. + +Note the functional syntax for existential quantification. The existential +quantifier is built from the operator \verb:ex:, which expects a +predicate as argument: +\begin{coq_example} +Check ex. +\end{coq_example} +and the notation \verb+(exists x:D, P x)+ is just concrete syntax for +\verb+(ex D (fun x:D => P x))+. +Existential quantification is handled in \Coq~ in a similar +fashion to the connectives \verb:/\: and \verb:\/: : it is introduced by +the proof combinator \verb:ex_intro:, which is invoked by the specific +tactic \verb:Exists:, and its elimination provides a witness \verb+a:D+ to +\verb:P:, together with an assumption \verb+h:(P a)+ that indeed \verb+a+ +verifies \verb:P:. Let us see how this works on this simple example. +\begin{coq_example} +intros x x_Rlinked. +\end{coq_example} + +Remark that \verb:intros: treats universal quantification in the same way +as the premises of implications. Renaming of bound variables occurs +when it is needed; for instance, had we started with \verb:intro y:, +we would have obtained the goal: +\begin{coq_eval} +Undo. +\end{coq_eval} +\begin{coq_example} +intro y. +\end{coq_example} +\begin{coq_eval} +Undo. +intros x x_Rlinked. +\end{coq_eval} + +Let us now use the existential hypothesis \verb:x_Rlinked: to +exhibit an R-successor y of x. This is done in two steps, first with +\verb:elim:, then with \verb:intros: + +\begin{coq_example} +elim x_Rlinked. +intros y Rxy. +\end{coq_example} + +Now we want to use \verb:R_transitive:. The \verb:apply: tactic will know +how to match \verb:x: with \verb:x:, and \verb:z: with \verb:x:, but needs +help on how to instantiate \verb:y:, which appear in the hypotheses of +\verb:R_transitive:, but not in its conclusion. We give the proper hint +to \verb:apply: in a \verb:with: clause, as follows: +\begin{coq_example} +apply R_transitive with y. +\end{coq_example} + +The rest of the proof is routine: +\begin{coq_example} +assumption. +apply R_symmetric; assumption. +\end{coq_example} +\begin{coq_example*} +Qed. +\end{coq_example*} + +Let us now close the current section. +\begin{coq_example} +End R_sym_trans. +\end{coq_example} + +Here \Coq's printout is a warning that all local hypotheses have been +discharged in the statement of \verb:refl_if:, which now becomes a general +theorem in the first-order language declared in section +\verb:Predicate_calculus:. In this particular example, the use of section +\verb:R_sym_trans: has not been really significant, since we could have +instead stated theorem \verb:refl_if: in its general form, and done +basically the same proof, obtaining \verb:R_symmetric: and +\verb:R_transitive: as local hypotheses by initial \verb:intros: rather +than as global hypotheses in the context. But if we had pursued the +theory by proving more theorems about relation \verb:R:, +we would have obtained all general statements at the closing of the section, +with minimal dependencies on the hypotheses of symmetry and transitivity. + +\subsection{Paradoxes of classical predicate calculus} + +Let us illustrate this feature by pursuing our \verb:Predicate_calculus: +section with an enrichment of our language: we declare a unary predicate +\verb:P: and a constant \verb:d:: +\begin{coq_example} +Variable P : D -> Prop. +Variable d : D. +\end{coq_example} + +We shall now prove a well-known fact from first-order logic: a universal +predicate is non-empty, or in other terms existential quantification +follows from universal quantification. +\begin{coq_example} +Lemma weird : (forall x:D, P x) -> exists a, P a. + intro UnivP. +\end{coq_example} + +First of all, notice the pair of parentheses around +\verb+forall x:D, P x+ in +the statement of lemma \verb:weird:. +If we had omitted them, \Coq's parser would have interpreted the +statement as a truly trivial fact, since we would +postulate an \verb:x: verifying \verb:(P x):. Here the situation is indeed +more problematic. If we have some element in \verb:Set: \verb:D:, we may +apply \verb:UnivP: to it and conclude, otherwise we are stuck. Indeed +such an element \verb:d: exists, but this is just by virtue of our +new signature. This points out a subtle difference between standard +predicate calculus and \Coq. In standard first-order logic, +the equivalent of lemma \verb:weird: always holds, +because such a rule is wired in the inference rules for quantifiers, the +semantic justification being that the interpretation domain is assumed to +be non-empty. Whereas in \Coq, where types are not assumed to be +systematically inhabited, lemma \verb:weird: only holds in signatures +which allow the explicit construction of an element in the domain of +the predicate. + +Let us conclude the proof, in order to show the use of the \verb:Exists: +tactic: +\begin{coq_example} +exists d; trivial. +Qed. +\end{coq_example} + +Another fact which illustrates the sometimes disconcerting rules of +classical +predicate calculus is Smullyan's drinkers' paradox: ``In any non-empty +bar, there is a person such that if she drinks, then everyone drinks''. +We modelize the bar by Set \verb:D:, drinking by predicate \verb:P:. +We shall need classical reasoning. Instead of loading the \verb:Classical: +module as we did above, we just state the law of excluded middle as a +local hypothesis schema at this point: +\begin{coq_example} +Hypothesis EM : forall A:Prop, A \/ ~ A. +Lemma drinker : exists x:D, P x -> forall x:D, P x. +\end{coq_example} +The proof goes by cases on whether or not +there is someone who does not drink. Such reasoning by cases proceeds +by invoking the excluded middle principle, via \verb:elim: of the +proper instance of \verb:EM:: +\begin{coq_example} +elim (EM (exists x, ~ P x)). +\end{coq_example} + +We first look at the first case. Let Tom be the non-drinker: +\begin{coq_example} +intro Non_drinker; elim Non_drinker; intros Tom Tom_does_not_drink. +\end{coq_example} + +We conclude in that case by considering Tom, since his drinking leads to +a contradiction: +\begin{coq_example} +exists Tom; intro Tom_drinks. +\end{coq_example} + +There are several ways in which we may eliminate a contradictory case; +a simple one is to use the \verb:absurd: tactic as follows: +\begin{coq_example} +absurd (P Tom); trivial. +\end{coq_example} + +We now proceed with the second case, in which actually any person will do; +such a John Doe is given by the non-emptiness witness \verb:d:: +\begin{coq_example} +intro No_nondrinker; exists d; intro d_drinks. +\end{coq_example} + +Now we consider any Dick in the bar, and reason by cases according to its +drinking or not: +\begin{coq_example} +intro Dick; elim (EM (P Dick)); trivial. +\end{coq_example} + +The only non-trivial case is again treated by contradiction: +\begin{coq_example} +intro Dick_does_not_drink; absurd (exists x, ~ P x); trivial. +exists Dick; trivial. +Qed. +\end{coq_example} + +Now, let us close the main section and look at the complete statements +we proved: +\begin{coq_example} +End Predicate_calculus. +Check refl_if. +Check weird. +Check drinker. +\end{coq_example} + +Remark how the three theorems are completely generic in the most general +fashion; +the domain \verb:D: is discharged in all of them, \verb:R: is discharged in +\verb:refl_if: only, \verb:P: is discharged only in \verb:weird: and +\verb:drinker:, along with the hypothesis that \verb:D: is inhabited. +Finally, the excluded middle hypothesis is discharged only in +\verb:drinker:. + +Note that the name \verb:d: has vanished as well from +the statements of \verb:weird: and \verb:drinker:, +since \Coq's pretty-printer replaces +systematically a quantification such as \verb+forall d:D, E+, where \verb:d: +does not occur in \verb:E:, by the functional notation \verb:D->E:. +Similarly the name \verb:EM: does not appear in \verb:drinker:. + +Actually, universal quantification, implication, +as well as function formation, are +all special cases of one general construct of type theory called +{\sl dependent product}. This is the mathematical construction +corresponding to an indexed family of functions. A function +$f\in \Pi x:D\cdot Cx$ maps an element $x$ of its domain $D$ to its +(indexed) codomain $Cx$. Thus a proof of $\forall x:D\cdot Px$ is +a function mapping an element $x$ of $D$ to a proof of proposition $Px$. + + +\subsection{Flexible use of local assumptions} + +Very often during the course of a proof we want to retrieve a local +assumption and reintroduce it explicitly in the goal, for instance +in order to get a more general induction hypothesis. The tactic +\verb:generalize: is what is needed here: + +\begin{coq_example} +Section Predicate_Calculus. +Variables P Q : nat -> Prop. +Variable R : nat -> nat -> Prop. +Lemma PQR : + forall x y:nat, (R x x -> P x -> Q x) -> P x -> R x y -> Q x. +intros. +generalize H0. +\end{coq_example} + +Sometimes it may be convenient to use a lemma, although we do not have +a direct way to appeal to such an already proven fact. The tactic \verb:cut: +permits to use the lemma at this point, keeping the corresponding proof +obligation as a new subgoal: +\begin{coq_example} +cut (R x x); trivial. +\end{coq_example} +We clean the goal by doing an \verb:Abort: command. +\begin{coq_example*} +Abort. +\end{coq_example*} + + +\subsection{Equality} + +The basic equality provided in \Coq~ is Leibniz equality, noted infix like +\verb+x=y+, when \verb:x: and \verb:y: are two expressions of +type the same Set. The replacement of \verb:x: by \verb:y: in any +term is effected by a variety of tactics, such as \verb:rewrite: +and \verb:replace:. + +Let us give a few examples of equality replacement. Let us assume that +some arithmetic function \verb:f: is null in zero: +\begin{coq_example} +Variable f : nat -> nat. +Hypothesis foo : f 0 = 0. +\end{coq_example} + +We want to prove the following conditional equality: +\begin{coq_example*} +Lemma L1 : forall k:nat, k = 0 -> f k = k. +\end{coq_example*} + +As usual, we first get rid of local assumptions with \verb:intro:: +\begin{coq_example} +intros k E. +\end{coq_example} + +Let us now use equation \verb:E: as a left-to-right rewriting: +\begin{coq_example} +rewrite E. +\end{coq_example} +This replaced both occurrences of \verb:k: by \verb:O:. + +Now \verb:apply foo: will finish the proof: + +\begin{coq_example} +apply foo. +Qed. +\end{coq_example} + +When one wants to rewrite an equality in a right to left fashion, we should +use \verb:rewrite <- E: rather than \verb:rewrite E: or the equivalent +\verb:rewrite -> E:. +Let us now illustrate the tactic \verb:replace:. +\begin{coq_example} +Hypothesis f10 : f 1 = f 0. +Lemma L2 : f (f 1) = 0. +replace (f 1) with 0. +\end{coq_example} +What happened here is that the replacement left the first subgoal to be +proved, but another proof obligation was generated by the \verb:replace: +tactic, as the second subgoal. The first subgoal is solved immediately +by applying lemma \verb:foo:; the second one transitivity and then +symmetry of equality, for instance with tactics \verb:transitivity: and +\verb:symmetry:: +\begin{coq_example} +apply foo. +transitivity (f 0); symmetry; trivial. +\end{coq_example} +In case the equality $t=u$ generated by \verb:replace: $u$ \verb:with: +$t$ is an assumption +(possibly modulo symmetry), it will be automatically proved and the +corresponding goal will not appear. For instance: +\begin{coq_example} +Restart. +replace (f 0) with 0. +rewrite f10; rewrite foo; trivial. +Qed. +\end{coq_example} + +\section{Using definitions} + +The development of mathematics does not simply proceed by logical +argumentation from first principles: definitions are used in an essential way. +A formal development proceeds by a dual process of abstraction, where one +proves abstract statements in predicate calculus, and use of definitions, +which in the contrary one instantiates general statements with particular +notions in order to use the structure of mathematical values for the proof of +more specialised properties. + +\subsection{Unfolding definitions} + +Assume that we want to develop the theory of sets represented as characteristic +predicates over some universe \verb:U:. For instance: +%CP Une petite explication pour le codage de element ? +\begin{coq_example} +Variable U : Type. +Definition set := U -> Prop. +Definition element (x:U) (S:set) := S x. +Definition subset (A B:set) := forall x:U, element x A -> element x B. +\end{coq_example} + +Now, assume that we have loaded a module of general properties about +relations over some abstract type \verb:T:, such as transitivity: + +\begin{coq_example} +Definition transitive (T:Type) (R:T -> T -> Prop) := + forall x y z:T, R x y -> R y z -> R x z. +\end{coq_example} + +Now, assume that we want to prove that \verb:subset: is a \verb:transitive: +relation. +\begin{coq_example} +Lemma subset_transitive : transitive set subset. +\end{coq_example} + +In order to make any progress, one needs to use the definition of +\verb:transitive:. The \verb:unfold: tactic, which replaces all +occurrences of a defined notion by its definition in the current goal, +may be used here. +\begin{coq_example} +unfold transitive. +\end{coq_example} + +Now, we must unfold \verb:subset:: +\begin{coq_example} +unfold subset. +\end{coq_example} +Now, unfolding \verb:element: would be a mistake, because indeed a simple proof +can be found by \verb:auto:, keeping \verb:element: an abstract predicate: +\begin{coq_example} +auto. +\end{coq_example} + +Many variations on \verb:unfold: are provided in \Coq. For instance, +we may selectively unfold one designated occurrence: +\begin{coq_example} +Undo 2. +unfold subset at 2. +\end{coq_example} + +One may also unfold a definition in a given local hypothesis, using the +\verb:in: notation: +\begin{coq_example} +intros. +unfold subset in H. +\end{coq_example} + +Finally, the tactic \verb:red: does only unfolding of the head occurrence +of the current goal: +\begin{coq_example} +red. +auto. +Qed. +\end{coq_example} + + +\subsection{Principle of proof irrelevance} + +Even though in principle the proof term associated with a verified lemma +corresponds to a defined value of the corresponding specification, such +definitions cannot be unfolded in \Coq: a lemma is considered an {\sl opaque} +definition. This conforms to the mathematical tradition of {\sl proof +irrelevance}: the proof of a logical proposition does not matter, and the +mathematical justification of a logical development relies only on +{\sl provability} of the lemmas used in the formal proof. + +Conversely, ordinary mathematical definitions can be unfolded at will, they +are {\sl transparent}. +%It is possible to enforce the reverse convention by +%declaring a definition as {\sl opaque} or a lemma as {\sl transparent}. + +\chapter{Induction} + +\section{Data Types as Inductively Defined Mathematical Collections} + +All the notions which were studied until now pertain to traditional +mathematical logic. Specifications of objects were abstract properties +used in reasoning more or less constructively; we are now entering +the realm of inductive types, which specify the existence of concrete +mathematical constructions. + +\subsection{Booleans} + +Let us start with the collection of booleans, as they are specified +in the \Coq's \verb:Prelude: module: +\begin{coq_example} +Inductive bool : Set := true | false. +\end{coq_example} + +Such a declaration defines several objects at once. First, a new +\verb:Set: is declared, with name \verb:bool:. Then the {\sl constructors} +of this \verb:Set: are declared, called \verb:true: and \verb:false:. +Those are analogous to introduction rules of the new Set \verb:bool:. +Finally, a specific elimination rule for \verb:bool: is now available, which +permits to reason by cases on \verb:bool: values. Three instances are +indeed defined as new combinators in the global context: \verb:bool_ind:, +a proof combinator corresponding to reasoning by cases, +\verb:bool_rec:, an if-then-else programming construct, +and \verb:bool_rect:, a similar combinator at the level of types. +Indeed: +\begin{coq_example} +Check bool_ind. +Check bool_rec. +Check bool_rect. +\end{coq_example} + +Let us for instance prove that every Boolean is true or false. +\begin{coq_example} +Lemma duality : forall b:bool, b = true \/ b = false. +intro b. +\end{coq_example} + +We use the knowledge that \verb:b: is a \verb:bool: by calling tactic +\verb:elim:, which is this case will appeal to combinator \verb:bool_ind: +in order to split the proof according to the two cases: +\begin{coq_example} +elim b. +\end{coq_example} + +It is easy to conclude in each case: +\begin{coq_example} +left; trivial. +right; trivial. +\end{coq_example} + +Indeed, the whole proof can be done with the combination of the +\verb:simple induction: tactic, which combines \verb:intro: and \verb:elim:, +with good old \verb:auto:: +\begin{coq_example} +Restart. +simple induction b; auto. +Qed. +\end{coq_example} + +\subsection{Natural numbers} + +Similarly to Booleans, natural numbers are defined in the \verb:Prelude: +module with constructors \verb:S: and \verb:O:: +\begin{coq_example} +Inductive nat : Set := + | O : nat + | S : nat -> nat. +\end{coq_example} + +The elimination principles which are automatically generated are Peano's +induction principle, and a recursion operator: +\begin{coq_example} +Check nat_ind. +Check nat_rec. +\end{coq_example} + +Let us start by showing how to program the standard primitive recursion +operator \verb:prim_rec: from the more general \verb:nat_rec:: +\begin{coq_example} +Definition prim_rec := nat_rec (fun i:nat => nat). +\end{coq_example} + +That is, instead of computing for natural \verb:i: an element of the indexed +\verb:Set: \verb:(P i):, \verb:prim_rec: computes uniformly an element of +\verb:nat:. Let us check the type of \verb:prim_rec:: +\begin{coq_example} +Check prim_rec. +\end{coq_example} + +Oops! Instead of the expected type \verb+nat->(nat->nat->nat)->nat->nat+ we +get an apparently more complicated expression. Indeed the type of +\verb:prim_rec: is equivalent by rule $\beta$ to its expected type; this may +be checked in \Coq~ by command \verb:Eval Cbv Beta:, which $\beta$-reduces +an expression to its {\sl normal form}: +\begin{coq_example} +Eval cbv beta in + ((fun _:nat => nat) O -> + (forall y:nat, (fun _:nat => nat) y -> (fun _:nat => nat) (S y)) -> + forall n:nat, (fun _:nat => nat) n). +\end{coq_example} + +Let us now show how to program addition with primitive recursion: +\begin{coq_example} +Definition addition (n m:nat) := prim_rec m (fun p rec:nat => S rec) n. +\end{coq_example} + +That is, we specify that \verb+(addition n m)+ computes by cases on \verb:n: +according to its main constructor; when \verb:n = O:, we get \verb:m:; + when \verb:n = S p:, we get \verb:(S rec):, where \verb:rec: is the result +of the recursive computation \verb+(addition p m)+. Let us verify it by +asking \Coq~to compute for us say $2+3$: +\begin{coq_example} +Eval compute in (addition (S (S O)) (S (S (S O)))). +\end{coq_example} + +Actually, we do not have to do all explicitly. {\Coq} provides a +special syntax {\tt Fixpoint/match} for generic primitive recursion, +and we could thus have defined directly addition as: + +\begin{coq_example} +Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (plus p m) + end. +\end{coq_example} + +For the rest of the session, we shall clean up what we did so far with +types \verb:bool: and \verb:nat:, in order to use the initial definitions +given in \Coq's \verb:Prelude: module, and not to get confusing error +messages due to our redefinitions. We thus revert to the state before +our definition of \verb:bool: with the \verb:Reset: command: +\begin{coq_example} +Reset bool. +\end{coq_example} + + +\subsection{Simple proofs by induction} +%CP Pourquoi ne pas commencer par des preuves d'egalite entre termes +% convertibles. +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +Let us now show how to do proofs by structural induction. We start with easy +properties of the \verb:plus: function we just defined. Let us first +show that $n=n+0$. +\begin{coq_example} +Lemma plus_n_O : forall n:nat, n = n + 0. +intro n; elim n. +\end{coq_example} + +What happened was that \verb:elim n:, in order to construct a \verb:Prop: +(the initial goal) from a \verb:nat: (i.e. \verb:n:), appealed to the +corresponding induction principle \verb:nat_ind: which we saw was indeed +exactly Peano's induction scheme. Pattern-matching instantiated the +corresponding predicate \verb:P: to \verb+fun n:nat => n = n + 0+, and we get +as subgoals the corresponding instantiations of the base case \verb:(P O): , +and of the inductive step \verb+forall y:nat, P y -> P (S y)+. +In each case we get an instance of function \verb:plus: in which its second +argument starts with a constructor, and is thus amenable to simplification +by primitive recursion. The \Coq~tactic \verb:simpl: can be used for +this purpose: +\begin{coq_example} +simpl. +auto. +\end{coq_example} + +We proceed in the same way for the base step: +\begin{coq_example} +simpl; auto. +Qed. +\end{coq_example} + +Here \verb:auto: succeeded, because it used as a hint lemma \verb:eq_S:, +which say that successor preserves equality: +\begin{coq_example} +Check eq_S. +\end{coq_example} + +Actually, let us see how to declare our lemma \verb:plus_n_O: as a hint +to be used by \verb:auto:: +\begin{coq_example} +Hint Resolve plus_n_O . +\end{coq_example} + +We now proceed to the similar property concerning the other constructor +\verb:S:: +\begin{coq_example} +Lemma plus_n_S : forall n m:nat, S (n + m) = n + S m. +\end{coq_example} + +We now go faster, remembering that tactic \verb:simple induction: does the +necessary \verb:intros: before applying \verb:elim:. Factoring simplification +and automation in both cases thanks to tactic composition, we prove this +lemma in one line: +\begin{coq_example} +simple induction n; simpl; auto. +Qed. +Hint Resolve plus_n_S . +\end{coq_example} + +Let us end this exercise with the commutativity of \verb:plus:: + +\begin{coq_example} +Lemma plus_com : forall n m:nat, n + m = m + n. +\end{coq_example} + +Here we have a choice on doing an induction on \verb:n: or on \verb:m:, the +situation being symmetric. For instance: +\begin{coq_example} +simple induction m; simpl; auto. +\end{coq_example} + +Here \verb:auto: succeeded on the base case, thanks to our hint +\verb:plus_n_O:, but the induction step requires rewriting, which +\verb:auto: does not handle: + +\begin{coq_example} +intros m' E; rewrite <- E; auto. +Qed. +\end{coq_example} + +\subsection{Discriminate} + +It is also possible to define new propositions by primitive recursion. +Let us for instance define the predicate which discriminates between +the constructors \verb:O: and \verb:S:: it computes to \verb:False: +when its argument is \verb:O:, and to \verb:True: when its argument is +of the form \verb:(S n):: +\begin{coq_example} +Definition Is_S (n:nat) := match n with + | O => False + | S p => True + end. +\end{coq_example} + +Now we may use the computational power of \verb:Is_S: in order to prove +trivially that \verb:(Is_S (S n)):: +\begin{coq_example} +Lemma S_Is_S : forall n:nat, Is_S (S n). +simpl; trivial. +Qed. +\end{coq_example} + +But we may also use it to transform a \verb:False: goal into +\verb:(Is_S O):. Let us show a particularly important use of this feature; +we want to prove that \verb:O: and \verb:S: construct different values, one +of Peano's axioms: +\begin{coq_example} +Lemma no_confusion : forall n:nat, 0 <> S n. +\end{coq_example} + +First of all, we replace negation by its definition, by reducing the +goal with tactic \verb:red:; then we get contradiction by successive +\verb:intros:: +\begin{coq_example} +red; intros n H. +\end{coq_example} + +Now we use our trick: +\begin{coq_example} +change (Is_S 0). +\end{coq_example} + +Now we use equality in order to get a subgoal which computes out to +\verb:True:, which finishes the proof: +\begin{coq_example} +rewrite H; trivial. +simpl; trivial. +\end{coq_example} + +Actually, a specific tactic \verb:discriminate: is provided +to produce mechanically such proofs, without the need for the user to define +explicitly the relevant discrimination predicates: + +\begin{coq_example} +Restart. +intro n; discriminate. +Qed. +\end{coq_example} + + +\section{Logic programming} + +In the same way as we defined standard data-types above, we +may define inductive families, and for instance inductive predicates. +Here is the definition of predicate $\le$ over type \verb:nat:, as +given in \Coq's \verb:Prelude: module: +\begin{coq_example*} +Inductive le (n:nat) : nat -> Prop := + | le_n : le n n + | le_S : forall m:nat, le n m -> le n (S m). +\end{coq_example*} + +This definition introduces a new predicate \verb+le:nat->nat->Prop+, +and the two constructors \verb:le_n: and \verb:le_S:, which are the +defining clauses of \verb:le:. That is, we get not only the ``axioms'' +\verb:le_n: and \verb:le_S:, but also the converse property, that +\verb:(le n m): if and only if this statement can be obtained as a +consequence of these defining clauses; that is, \verb:le: is the +minimal predicate verifying clauses \verb:le_n: and \verb:le_S:. This is +insured, as in the case of inductive data types, by an elimination principle, +which here amounts to an induction principle \verb:le_ind:, stating this +minimality property: +\begin{coq_example} +Check le. +Check le_ind. +\end{coq_example} + +Let us show how proofs may be conducted with this principle. +First we show that $n\le m \Rightarrow n+1\le m+1$: +\begin{coq_example} +Lemma le_n_S : forall n m:nat, le n m -> le (S n) (S m). +intros n m n_le_m. +elim n_le_m. +\end{coq_example} + +What happens here is similar to the behaviour of \verb:elim: on natural +numbers: it appeals to the relevant induction principle, here \verb:le_ind:, +which generates the two subgoals, which may then be solved easily +%as if ``backchaining'' the current goal +with the help of the defining clauses of \verb:le:. +\begin{coq_example} +apply le_n; trivial. +intros; apply le_S; trivial. +\end{coq_example} + +Now we know that it is a good idea to give the defining clauses as hints, +so that the proof may proceed with a simple combination of +\verb:induction: and \verb:auto:. +\begin{coq_example} +Restart. +Hint Resolve le_n le_S . +\end{coq_example} + +We have a slight problem however. We want to say ``Do an induction on +hypothesis \verb:(le n m):'', but we have no explicit name for it. What we +do in this case is to say ``Do an induction on the first unnamed hypothesis'', +as follows. +\begin{coq_example} +simple induction 1; auto. +Qed. +\end{coq_example} + +Here is a more tricky problem. Assume we want to show that +$n\le 0 \Rightarrow n=0$. This reasoning ought to follow simply from the +fact that only the first defining clause of \verb:le: applies. +\begin{coq_example} +Lemma tricky : forall n:nat, le n 0 -> n = 0. +\end{coq_example} + +However, here trying something like \verb:induction 1: would lead +nowhere (try it and see what happens). +An induction on \verb:n: would not be convenient either. +What we must do here is analyse the definition of \verb"le" in order +to match hypothesis \verb:(le n O): with the defining clauses, to find +that only \verb:le_n: applies, whence the result. +This analysis may be performed by the ``inversion'' tactic +\verb:inversion_clear: as follows: +\begin{coq_example} +intros n H; inversion_clear H. +trivial. +Qed. +\end{coq_example} + +\chapter{Modules} + +\section{Opening library modules} + +When you start \Coq~ without further requirements in the command line, +you get a bare system with few libraries loaded. As we saw, a standard +prelude module provides the standard logic connectives, and a few +arithmetic notions. If you want to load and open other modules from +the library, you have to use the \verb"Require" command, as we saw for +classical logic above. For instance, if you want more arithmetic +constructions, you should request: +\begin{coq_example*} +Require Import Arith. +\end{coq_example*} + +Such a command looks for a (compiled) module file \verb:Arith.vo: in +the libraries registered by \Coq. Libraries inherit the structure of +the file system of the operating system and are registered with the +command \verb:Add LoadPath:. Physical directories are mapped to +logical directories. Especially the standard library of \Coq~ is +pre-registered as a library of name \verb=Coq=. Modules have absolute +unique names denoting their place in \Coq~ libraries. An absolute +name is a sequence of single identifiers separated by dots. E.g. the +module \verb=Arith= has full name \verb=Coq.Arith.Arith= and because +it resides in eponym subdirectory \verb=Arith= of the standard +library, it can be as well required by the command + +\begin{coq_example*} +Require Import Coq.Arith.Arith. +\end{coq_example*} + +This may be useful to avoid ambiguities if somewhere, in another branch +of the libraries known by Coq, another module is also called +\verb=Arith=. Notice that by default, when a library is registered, +all its contents, and all the contents of its subdirectories recursively are +visible and accessible by a short (relative) name as \verb=Arith=. +Notice also that modules or definitions not explicitly registered in +a library are put in a default library called \verb=Top=. + +The loading of a compiled file is quick, because the corresponding +development is not type-checked again. + +\section{Creating your own modules} + +You may create your own modules, by writing \Coq~ commands in a file, +say \verb:my_module.v:. Such a module may be simply loaded in the current +context, with command \verb:Load my_module:. It may also be compiled, +%using the command \verb:Compile Module my_module: directly at the +%\Coq~ toplevel, or else +in ``batch'' mode, using the UNIX command +\verb:coqc:. Compiling the module \verb:my_module.v: creates a +file \verb:my_module.vo:{} that can be reloaded with command +\verb:Require Import my_module:. + +If a required module depends on other modules then the latters are +automatically required beforehand. However their contents is not +automatically visible. If you want a module \verb=M= required in a +module \verb=N= to be automatically visible when \verb=N= is required, +you should use \verb:Require Export M: in your module \verb:N:. + +\section{Managing the context} + +It is often difficult to remember the names of all lemmas and +definitions available in the current context, especially if large +libraries have been loaded. A convenient \verb:SearchAbout: command +is available to lookup all known facts +concerning a given predicate. For instance, if you want to know all the +known lemmas about the less or equal relation, just ask: +\begin{coq_example} +SearchAbout le. +\end{coq_example} +Another command \verb:Search: displays only lemmas where the searched +predicate appears at the head position in the conclusion. +\begin{coq_example} +Search le. +\end{coq_example} + +A new and more convenient search tool is \textsf{SearchPattern} +developed by Yves Bertot. It allows to find the theorems with a +conclusion matching a given pattern, where \verb:\_: can be used in +place of an arbitrary term. We remark in this example, that \Coq{} +provides usual infix notations for arithmetic operators. + +\begin{coq_example} +SearchPattern (_ + _ = _). +\end{coq_example} + +% The argument to give is a type and it searches in the current context all +% constants having the same type modulo certain notion of +% \textit{isomorphism}. For example~: + +% \begin{coq_example} +% Require Arith. +% SearchIsos nat -> nat -> Prop. +% SearchIsos (x,y,z:nat)(le x y) -> (le y z) -> (le x z). +% \end{coq_example} + +\section{Now you are on your own} + +This tutorial is necessarily incomplete. If you wish to pursue serious +proving in \Coq, you should now get your hands on \Coq's Reference Manual, +which contains a complete description of all the tactics we saw, +plus many more. +You also should look in the library of developed theories which is distributed +with \Coq, in order to acquaint yourself with various proof techniques. + + +\end{document} + +% $Id: Tutorial.tex 8607 2006-02-23 14:21:14Z herbelin $ -- cgit v1.2.3