aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--CONTRIBUTING.md10
-rw-r--r--Makefile.doc3
-rw-r--r--dev/doc/MERGING.md50
-rwxr-xr-xdev/tools/merge-pr.sh18
-rw-r--r--doc/refman/AsyncProofs.tex221
-rw-r--r--doc/refman/Reference-Manual.tex1
-rw-r--r--doc/sphinx/addendum/parallel-proof-processing.rst229
-rw-r--r--doc/sphinx/index.rst1
-rw-r--r--interp/constrextern.ml12
-rw-r--r--intf/vernacexpr.ml15
-rw-r--r--parsing/g_vernac.ml415
-rw-r--r--plugins/funind/glob_term_to_relation.ml4
-rw-r--r--printing/ppvernac.ml5
-rw-r--r--stm/stm.ml17
-rw-r--r--stm/vernac_classifier.ml10
-rw-r--r--test-suite/output/Projections.out2
-rw-r--r--test-suite/output/Projections.v11
-rw-r--r--vernac/vernacentries.ml21
18 files changed, 344 insertions, 301 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 213b87735..1a3c99369 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -38,13 +38,11 @@ Whitespace discipline (do not indent using tabs, no trailing spaces, text files
Here are a few tags Coq developers may add to your PR and what they mean. In general feedback and requests for you as the pull request author will be in the comments and tags are only used to organize pull requests.
-- [needs: rebase](https://github.com/coq/coq/pulls?utf8=%E2%9C%93&q=is%3Aopen%20is%3Apr%20label%3A%22needs%3A%20rebase%22%20) indicates the PR should be rebased on top of the latest `master` branch. See the [GitHub documentation](https://help.github.com/articles/about-git-rebase/) for a brief introduction to using `git rebase`.
+- [needs: rebase](https://github.com/coq/coq/pulls?utf8=%E2%9C%93&q=is%3Aopen%20is%3Apr%20label%3A%22needs%3A%20rebase%22) indicates the PR should be rebased on top of the latest `master` branch. See the [GitHub documentation](https://help.github.com/articles/about-git-rebase/) for a brief introduction to using `git rebase`.
- [needs: fixing](https://github.com/coq/coq/pulls?q=is%3Aopen+is%3Apr+label%3A%22needs%3A+fixing%22) indicates the PR needs a fix, as discussed in the comments.
-- [needs: testing](https://github.com/coq/coq/pulls?q=is%3Aopen+is%3Apr+label%3A%22needs%3A+testing%22) indicates the PR needs testing. This is often used when testing beyond what the test suite can handle is required. For example, performance benchmarking is currently performed with a different infrastructure. Unless some followup is specifically requested you aren't expected to do this additional testing.
+- [needs: benchmarking](https://github.com/coq/coq/pulls?q=is%3Aopen+is%3Apr+label%3A%22needs%3A+benchmarking%22) and [needs: testing](https://github.com/coq/coq/pulls?q=is%3Aopen+is%3Apr+label%3A%22needs%3A+testing%22) indicate the PR needs testing beyond what the test suite can handle. For example, performance benchmarking is currently performed with a different infrastructure. Unless some followup is specifically requested you aren't expected to do this additional testing.
-The release manager uses the following filter to know which PRs seem ready for merge. If you are waiting for a PR to be merged, make sure it appears in this list:
-
-- [Pull requests ready for merge](https://github.com/coq/coq/pulls?utf8=%E2%9C%93&q=is%3Apr%20is%3Aopen%20-label%3A%22needs%3A%20discussion%22%20-label%3A%22needs%3A%20testing%22%20-label%3A%22needs%3A%20fixing%22%20-label%3A%22needs%3A%20progress%22%20-label%3A%22needs%3A%20rebase%22%20-label%3A%22needs%3A%20review%22%20-label%3A%22needs%3A%20help%22%20-label%3A%22needs%3A%20independent%20fix%22%20-label%3A%22needs%3A%20feedback%22%20-label%3A%22help%20wanted%22%20-review%3Achanges_requested%20-status%3Apending%20base%3Amaster%20sort%3Aupdated-asc%20-label%3A%22needs%3A%20squashing%22%20)
+To learn more about the merging process, you can read the [merging documentation for Coq maintainers](/dev/doc/MERGING.md).
## Documentation
@@ -52,7 +50,7 @@ Currently the process for contributing to the documentation is the same as for c
Our issue tracker includes a flag to mark bugs related to documentation. You can view a list of documentation-related bugs using a [GitHub issue search](https://github.com/coq/coq/issues?q=is%3Aopen+is%3Aissue+label%3A%22kind%3A+documentation%22). Many of these bugs can be fixed by contributing writing, without knowledge of Coq's OCaml source code.
-The sources for the [Coq reference manual](https://coq.inria.fr/distrib/current/refman/) are at [`doc/refman`](/doc/refman). These are written in LaTeX and compiled to HTML with [HeVeA](http://hevea.inria.fr/).
+The sources for the [Coq reference manual](https://coq.inria.fr/distrib/current/refman/) are at [`doc/sphinx`](/doc/sphinx). These are written in reStructuredText and compiled to HTML and PDF with [Sphinx](http://www.sphinx-doc.org/).
You may also contribute to the informal documentation available in [Cocorico](https://github.com/coq/coq/wiki) (the Coq wiki), and the [Coq FAQ](https://github.com/coq/coq/wiki/The-Coq-FAQ). Both of these are editable by anyone with a GitHub account.
diff --git a/Makefile.doc b/Makefile.doc
index ecc68979e..0b45b9cec 100644
--- a/Makefile.doc
+++ b/Makefile.doc
@@ -65,8 +65,7 @@ REFMANCOQTEXFILES:=$(addprefix doc/refman/, \
REFMANTEXFILES:=$(addprefix doc/refman/, \
headers.sty Reference-Manual.tex \
- RefMan-uti.tex \
- AsyncProofs.tex) \
+ RefMan-uti.tex) \
$(REFMANCOQTEXFILES) \
REFMANEPSFILES:=doc/refman/coqide.eps doc/refman/coqide-queries.eps
diff --git a/dev/doc/MERGING.md b/dev/doc/MERGING.md
index 71fc39608..3a2df6a81 100644
--- a/dev/doc/MERGING.md
+++ b/dev/doc/MERGING.md
@@ -1,6 +1,7 @@
# Merging changes in Coq
-This document describes how patches (submitted as Pull Requests) should be
+This document describes how patches (submitted as pull requests
+on the `master` branch) should be
merged into the main repository (https://github.com/coq/coq).
## Code owners
@@ -65,14 +66,57 @@ In some rare cases (e.g. the conflicts are in the CHANGES file), it is ok to fix
the conflicts in the merge commit (following the same steps as below), and push
to `master` directly. Don't use the GitHub interface to fix these conflicts.
-The command to be used is:
+To merge the PR proceed in the following way
```
+$ git checkout master
+$ git pull
$ dev/tools/merge-pr XXXX
+$ git push upstream
```
-where `XXXX` is the number of the PR to be merged. This operation should be followed by a push.
+where `XXXX` is the number of the PR to be merged and `upstream` is the name
+of your remote pointing to `git@github.com:coq/coq.git`.
+Note that you are only supposed to merge PRs into `master`. PRs should rarely
+target the stable branch, but when it is the case they are the responsibility
+of the release manager.
+
+This script conducts various checks before proceeding to merge. Don't bypass them
+without a good reason to, and in that case, write a comment in the PR thread to
+explain the reason.
Maintainers MUST NOT merge their own patches.
DON'T USE the GitHub interface for merging, since it will prevent the automated
backport script from operating properly, generates bad commit messages, and a
messy history when there are conflicts.
+
+### What to do if the PR has overlays
+
+If the PR breaks compatibility of some developments in CI, then the author must
+have prepared overlays for these developments (see [`dev/ci/README.md`](/dev/ci/README.md))
+and the PR must absolutely update the `CHANGES` file.
+
+There are two cases to consider:
+
+- If the patch is backward compatible (best scenario), then you should get
+ upstream maintainers to integrate it before merging the PR.
+- If the patch is not backward compatible (which is often the case when
+ patching plugins after an update to the Coq API), then you can proceed to
+ merge the PR and then notify upstream they can merge the patch. This is a
+ less preferable scenario because it is probably going to create
+ spurious CI failures for unrelated PRs.
+
+### Merge script dependencies
+
+The merge script passes option `-S` to `git merge` to ensure merge commits
+are signed. Consequently, it depends on the GnuPG command utility being
+installed and a GPG key being available. Here is a short tutorial to
+creating your own GPG key:
+<https://ekaia.org/blog/2009/05/10/creating-new-gpgkey/>
+
+The script depends on a few other utilities. If you are a Nix user, the
+simplest way of getting them is to run `nix-shell` first.
+
+**Note for homebrew (MacOS) users:** it has been reported that installing GnuPG
+is not out of the box. Installing explicitly "pinentry-mac" seems important for
+typing of passphrase to work correctly (see also this
+[Stack Overflow Q-and-A](https://stackoverflow.com/questions/39494631/gpg-failed-to-sign-the-data-fatal-failed-to-write-commit-object-git-2-10-0)).
diff --git a/dev/tools/merge-pr.sh b/dev/tools/merge-pr.sh
index 2f6f1af54..ecfdfab94 100755
--- a/dev/tools/merge-pr.sh
+++ b/dev/tools/merge-pr.sh
@@ -61,10 +61,12 @@ fi
# Fetching PR metadata
-TITLE=$(curl -s "$API/pulls/$PR" | jq -r '.title')
+PRDATA=$(curl -s "$API/pulls/$PR")
+
+TITLE=$(echo "$PRDATA" | jq -r '.title')
info "title for PR $PR is ${BLUE}$TITLE"
-BASE_BRANCH=$(curl -s "$API/pulls/$PR" | jq -r '.base.label')
+BASE_BRANCH=$(echo "$PRDATA" | jq -r '.base.label')
info "PR $PR targets branch ${BLUE}$BASE_BRANCH"
CURRENT_LOCAL_BRANCH=$(git rev-parse --abbrev-ref HEAD)
@@ -107,17 +109,17 @@ fi;
# Sanity check: CI failed
-STATUS=$(curl -s "$API/commits/$COMMIT/status" | jq -r '.state')
+STATUS=$(curl -s "$API/commits/$COMMIT/status")
-if [ "$STATUS" != "success" ]; then
- error "CI unsuccessful on ${BLUE}$(curl -s "$API/commits/$COMMIT/status" |
+if [ "$(echo "$STATUS" | jq -r '.state')" != "success" ]; then
+ error "CI unsuccessful on ${BLUE}$(echo "$STATUS" |
jq -r -c '.statuses|map(select(.state != "success"))|map(.context)')"
ask_confirmation
fi;
# Sanity check: has labels named "needs:"
-NEEDS_LABELS=$(curl -s "$API/pulls/$PR" | jq -rc '.labels | map(select(.name | match("needs:"))) | map(.name)')
+NEEDS_LABELS=$(echo "$PRDATA" | jq -rc '.labels | map(select(.name | match("needs:"))) | map(.name)')
if [ "$NEEDS_LABELS" != "[]" ]; then
error "needs:something labels still present: ${BLUE}$NEEDS_LABELS"
ask_confirmation
@@ -125,7 +127,7 @@ fi
# Sanity check: has milestone
-MILESTONE=$(curl -s "$API/pulls/$PR" | jq -rc '.milestone.title')
+MILESTONE=$(echo "$PRDATA" | jq -rc '.milestone.title')
if [ "$MILESTONE" = "null" ]; then
error "no milestone set, please set one"
ask_confirmation
@@ -133,7 +135,7 @@ fi
# Sanity check: has kind
-KIND=$(curl -s "$API/pulls/$PR" | jq -rc '.labels | map(select(.name | match("kind:"))) | map(.name)')
+KIND=$(echo "$PRDATA" | jq -rc '.labels | map(select(.name | match("kind:"))) | map(.name)')
if [ "$KIND" = "[]" ]; then
error "no kind:something label set, please set one"
ask_confirmation
diff --git a/doc/refman/AsyncProofs.tex b/doc/refman/AsyncProofs.tex
deleted file mode 100644
index 8f9d876cb..000000000
--- a/doc/refman/AsyncProofs.tex
+++ /dev/null
@@ -1,221 +0,0 @@
-\achapter{Asynchronous and Parallel Proof Processing\label{Asyncprocessing}}
-%HEVEA\cutname{async-proofs.html}
-\aauthor{Enrico Tassi}
-
-\label{pralitp}
-\index{Asynchronous and Parallel Proof Processing!presentation}
-
-This chapter explains how proofs can be asynchronously processed by Coq.
-This feature improves the reactivity of the system when used in interactive
-mode via CoqIDE. In addition, it allows Coq to take advantage of
-parallel hardware when used as a batch compiler by decoupling the checking
-of statements and definitions from the construction and checking of proofs
-objects.
-
-This feature is designed to help dealing with huge libraries of theorems
-characterized by long proofs. In the current state, it may not be beneficial
-on small sets of short files.
-
-This feature has some technical limitations that may make it unsuitable for
-some use cases.
-
-For example, in interactive mode, some errors coming from the kernel of Coq
-are signaled late. The type of errors belonging to this category
-are universe inconsistencies.
-
-At the time of writing, only opaque proofs (ending with \texttt{Qed} or \texttt{Admitted}) can be processed asynchronously.
-
-Finally, asynchronous processing is disabled when running CoqIDE in Windows. The
-current implementation of the feature is not stable on Windows. It can be
-enabled, as described below at \ref{interactivecaveats}, though doing so is not
-recommended.
-
-\section{Proof annotations}
-
-To process a proof asynchronously Coq needs to know the precise statement
-of the theorem without looking at the proof. This requires some annotations
-if the theorem is proved inside a \texttt{Section} (see Section~\ref{Section}).
-
-When a section ends, Coq looks at the proof object to decide which
-section variables are actually used and hence have to be quantified in the
-statement of the theorem. To avoid making the construction of proofs
-mandatory when ending a section, one can start each proof with the
-\texttt{Proof using} command (Section~\ref{ProofUsing}) that declares which
-section variables the theorem uses.
-
-The presence of \texttt{Proof using} is needed to process proofs
-asynchronously in interactive mode.
-
-It is not strictly mandatory in batch mode if it is not the first time
-the file is compiled and if the file itself did not change. When the
-proof does not begin with \texttt{Proof using}, the system records in an
-auxiliary file, produced along with the \texttt{.vo} file, the list of
-section variables used.
-
-\subsubsection{Automatic suggestion of proof annotations}
-
-The command \texttt{Set Suggest Proof Using} makes Coq suggest, when a
-\texttt{Qed} command is processed, a correct proof annotation. It is up
-to the user to modify the proof script accordingly.
-
-\section{Proof blocks and error resilience}
-
-Coq 8.6 introduces a mechanism for error resiliency: in interactive mode Coq
-is able to completely check a document containing errors instead of bailing
-out at the first failure.
-
-Two kind of errors are supported: errors occurring in vernacular commands and
-errors occurring in proofs.
-
-To properly recover from a failing tactic, Coq needs to recognize the structure of
-the proof in order to confine the error to a sub proof. Proof block detection
-is performed by looking at the syntax of the proof script (i.e. also looking at indentation).
-Coq comes with four kind of proof blocks, and an ML API to add new ones.
-
-\begin{description}
-\item[curly] blocks are delimited by \texttt{\{} and \texttt{\}}, see \ref{Proof-handling}
-\item[par] blocks are atomic, i.e. just one tactic introduced by the \texttt{par:} goal selector
-\item[indent] blocks end with a tactic indented less than the previous one
-\item[bullet] blocks are delimited by two equal bullet signs at the same indentation level
-\end{description}
-
-\subsection{Caveats}
-
-When a vernacular command fails the subsequent error messages may be bogus, i.e. caused by
-the first error. Error resiliency for vernacular commands can be switched off passing
-\texttt{-async-proofs-command-error-resilience off} to CoqIDE.
-
-An incorrect proof block detection can result into an incorrect error recovery and
-hence in bogus errors. Proof block detection cannot be precise for bullets or
-any other non well parenthesized proof structure. Error resiliency can be
-turned off or selectively activated for any set of block kind passing to
-CoqIDE one of the following options:
-\texttt{-async-proofs-tactic-error-resilience off},
-\texttt{-async-proofs-tactic-error-resilience all},
-\texttt{-async-proofs-tactic-error-resilience $blocktype_1$,..., $blocktype_n$}.
-Valid proof block types are: ``curly'', ``par'', ``indent'', ``bullet''.
-
-\section{Interactive mode}
-
-At the time of writing the only user interface supporting asynchronous proof
-processing is CoqIDE.
-
-When CoqIDE is started, two Coq processes are created. The master one follows
-the user, giving feedback as soon as possible by skipping proofs, which are
-delegated to the worker process. The worker process, whose state can be seen
-by clicking on the button in the lower right corner of the main CoqIDE window,
-asynchronously processes the proofs. If a proof contains an error, it is
-reported in red in the label of the very same button, that can also be used to
-see the list of errors and jump to the corresponding line.
-
-If a proof is processed asynchronously the corresponding \texttt{Qed} command
-is colored using a lighter color that usual. This signals that
-the proof has been delegated to a worker process (or will be processed
-lazily if the \texttt{-async-proofs lazy} option is used). Once finished, the
-worker process will provide the proof object, but this will not be
-automatically checked by the kernel of the main process. To force
-the kernel to check all the proof objects, one has to click the button
-with the gears. Only then are all the universe constraints checked.
-
-\subsubsection{Caveats}
-\label{interactivecaveats}
-
-The number of worker processes can be increased by passing CoqIDE the
-\texttt{-async-proofs-j $n$} flag. Note that the memory consumption
-increases too, since each worker requires the same amount of memory as
-the master process. Also note that increasing the number of workers may
-reduce the reactivity of the master process to user commands.
-
-To disable this feature, one can pass the \texttt{-async-proofs off} flag to
-CoqIDE. Conversely, on Windows, where the feature is disabled by default,
-pass the \texttt{-async-proofs on} flag to enable it.
-
-Proofs that are known to take little time to process are not delegated to a
-worker process. The threshold can be configure with \texttt{-async-proofs-delegation-threshold}. Default is 0.03 seconds.
-
-\section{Batch mode}
-
-When Coq is used as a batch compiler by running \texttt{coqc} or
-\texttt{coqtop -compile}, it produces a \texttt{.vo} file for each
-\texttt{.v} file. A \texttt{.vo} file contains, among other things,
-theorems statements and proofs. Hence to produce a \texttt{.vo} Coq need
-to process all the proofs of the \texttt{.v} file.
-
-The asynchronous processing of proofs can decouple the generation of a
-compiled file (like the \texttt{.vo} one) that can be loaded by
-\texttt{Require} from the generation and checking of the proof objects.
-The \texttt{-quick} flag can be passed to \texttt{coqc} or
-\texttt{coqtop} to produce, quickly, \texttt{.vio} files. Alternatively,
-when using a \texttt{Makefile} produced by \texttt{coq\_makefile}, the
-\texttt{quick} target can be used to compile all files using the
-\texttt{-quick} flag.
-
-A \texttt{.vio} file can be loaded using \texttt{Require} exactly as a
-\texttt{.vo} file but proofs will not be available (the \texttt{Print}
-command produces an error). Moreover, some universe constraints might be
-missing, so universes inconsistencies might go unnoticed. A
-\texttt{.vio} file does not contain proof objects, but proof tasks,
-i.e. what a worker process can transform into a proof object.
-
-Compiling a set of files with the \texttt{-quick} flag allows one to work,
-interactively, on any file without waiting for all the proofs to be checked.
-
-When working interactively, one can fully check all the \texttt{.v} files by
-running \texttt{coqc} as usual.
-
-Alternatively one can turn each \texttt{.vio} into the corresponding
-\texttt{.vo}. All \texttt{.vio} files can be processed in parallel,
-hence this alternative might be faster. The command \texttt{coqtop
- -schedule-vio2vo 2 a b c} can be used to obtain a good scheduling for 2
-workers to produce \texttt{a.vo}, \texttt{b.vo}, and \texttt{c.vo}. When
-using a \texttt{Makefile} produced by \texttt{coq\_makefile}, the
-\texttt{vio2vo} target can be used for that purpose. Variable \texttt{J}
-should be set to the number of workers, e.g. \texttt{make vio2vo J=2}.
-The only caveat is that, while the \texttt{.vo} files obtained from
-\texttt{.vio} files are complete (they contain all proof terms and
-universe constraints), the satisfiability of all universe constraints has
-not been checked globally (they are checked to be consistent for every
-single proof). Constraints will be checked when these \texttt{.vo} files
-are (recursively) loaded with \texttt{Require}.
-
-There is an extra, possibly even faster, alternative: just check the
-proof tasks stored in \texttt{.vio} files without producing the
-\texttt{.vo} files. This is possibly faster because all the proof tasks
-are independent, hence one can further partition the job to be done
-between workers. The \texttt{coqtop -schedule-vio-checking 6 a b c}
-command can be used to obtain a good scheduling for 6 workers to check
-all the proof tasks of \texttt{a.vio}, \texttt{b.vio}, and
-\texttt{c.vio}. Auxiliary files are used to predict how long a proof task
-will take, assuming it will take the same amount of time it took last
-time. When using a \texttt{Makefile} produced by \texttt{coq\_makefile},
-the \texttt{checkproofs} target can be used to check all \texttt{.vio}
-files. Variable \texttt{J} should be set to the number of workers,
-e.g. \texttt{make checkproofs J=6}. As when converting \texttt{.vio}
-files to \texttt{.vo} files, universe constraints are not checked to be
-globally consistent. Hence this compilation mode is only useful for quick
-regression testing and on developments not making heavy use of the $Type$
-hierarchy.
-
-\section{Limiting the number of parallel workers}
-\label{coqworkmgr}
-
-Many Coq processes may run on the same computer, and each of them may start
-many additional worker processes.
-The \texttt{coqworkmgr} utility lets one limit the number of workers, globally.
-
-The utility accepts the \texttt{-j} argument to specify the maximum number of
-workers (defaults to 2). \texttt{coqworkmgr} automatically starts in the
-background and prints an environment variable assignment like
-\texttt{COQWORKMGR\_SOCKET=localhost:45634}. The user must set this variable in
-all the shells from which Coq processes will be started. If one uses just
-one terminal running the bash shell, then \texttt{export `coqworkmgr -j 4`} will
-do the job.
-
-After that, all Coq processes, e.g. \texttt{coqide} and \texttt{coqc},
-will honor the limit, globally.
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "Reference-Manual"
-%%% End:
diff --git a/doc/refman/Reference-Manual.tex b/doc/refman/Reference-Manual.tex
index cfb3c625b..7e68dd752 100644
--- a/doc/refman/Reference-Manual.tex
+++ b/doc/refman/Reference-Manual.tex
@@ -117,7 +117,6 @@ Options A and B of the licence are {\em not} elected.}
%END LATEX
\part{Addendum to the Reference Manual}
\include{AddRefMan-pre}%
-\include{AsyncProofs}% Paral-ITP
\include{Universes.v}% Universe polymorphes
\include{Misc.v}
%BEGIN LATEX
diff --git a/doc/sphinx/addendum/parallel-proof-processing.rst b/doc/sphinx/addendum/parallel-proof-processing.rst
new file mode 100644
index 000000000..8c1b9d152
--- /dev/null
+++ b/doc/sphinx/addendum/parallel-proof-processing.rst
@@ -0,0 +1,229 @@
+.. include:: ../replaces.rst
+
+.. _asynchronousandparallelproofprocessing:
+
+Asynchronous and Parallel Proof Processing
+==========================================
+
+:Author: Enrico Tassi
+
+This chapter explains how proofs can be asynchronously processed by
+|Coq|. This feature improves the reactivity of the system when used in
+interactive mode via |CoqIDE|. In addition, it allows |Coq| to take
+advantage of parallel hardware when used as a batch compiler by
+decoupling the checking of statements and definitions from the
+construction and checking of proofs objects.
+
+This feature is designed to help dealing with huge libraries of
+theorems characterized by long proofs. In the current state, it may
+not be beneficial on small sets of short files.
+
+This feature has some technical limitations that may make it
+unsuitable for some use cases.
+
+For example, in interactive mode, some errors coming from the kernel
+of |Coq| are signaled late. The type of errors belonging to this
+category are universe inconsistencies.
+
+At the time of writing, only opaque proofs (ending with ``Qed`` or
+``Admitted``) can be processed asynchronously.
+
+Finally, asynchronous processing is disabled when running |CoqIDE| in
+Windows. The current implementation of the feature is not stable on
+Windows. It can be enabled, as described below at :ref:`interactive-mode`,
+though doing so is not recommended.
+
+Proof annotations
+----------------------
+
+To process a proof asynchronously |Coq| needs to know the precise
+statement of the theorem without looking at the proof. This requires
+some annotations if the theorem is proved inside a Section (see
+Section :ref:`TODO-2.4`).
+
+When a section ends, |Coq| looks at the proof object to decide which
+section variables are actually used and hence have to be quantified in
+the statement of the theorem. To avoid making the construction of
+proofs mandatory when ending a section, one can start each proof with
+the ``Proof using`` command (Section :ref:`TODO-7.1.5`) that declares which section
+variables the theorem uses.
+
+The presence of ``Proof`` using is needed to process proofs asynchronously
+in interactive mode.
+
+It is not strictly mandatory in batch mode if it is not the first time
+the file is compiled and if the file itself did not change. When the
+proof does not begin with Proof using, the system records in an
+auxiliary file, produced along with the `.vo` file, the list of section
+variables used.
+
+Automatic suggestion of proof annotations
+`````````````````````````````````````````
+
+The command ``Set Suggest Proof Using`` makes |Coq| suggest, when a ``Qed``
+command is processed, a correct proof annotation. It is up to the user
+to modify the proof script accordingly.
+
+
+Proof blocks and error resilience
+--------------------------------------
+
+|Coq| 8.6 introduced a mechanism for error resiliency: in interactive
+mode |Coq| is able to completely check a document containing errors
+instead of bailing out at the first failure.
+
+Two kind of errors are supported: errors occurring in vernacular
+commands and errors occurring in proofs.
+
+To properly recover from a failing tactic, |Coq| needs to recognize the
+structure of the proof in order to confine the error to a sub proof.
+Proof block detection is performed by looking at the syntax of the
+proof script (i.e. also looking at indentation). |Coq| comes with four
+kind of proof blocks, and an ML API to add new ones.
+
+:curly: blocks are delimited by { and }, see Chapter :ref:`proofhandling`
+:par: blocks are atomic, i.e. just one tactic introduced by the `par:`
+ goal selector
+:indent: blocks end with a tactic indented less than the previous one
+:bullet: blocks are delimited by two equal bullet signs at the same
+ indentation level
+
+Caveats
+````````
+
+When a vernacular command fails the subsequent error messages may be
+bogus, i.e. caused by the first error. Error resiliency for vernacular
+commands can be switched off by passing ``-async-proofs-command-error-resilience off``
+to |CoqIDE|.
+
+An incorrect proof block detection can result into an incorrect error
+recovery and hence in bogus errors. Proof block detection cannot be
+precise for bullets or any other non well parenthesized proof
+structure. Error resiliency can be turned off or selectively activated
+for any set of block kind passing to |CoqIDE| one of the following
+options:
+
+- ``-async-proofs-tactic-error-resilience off``
+- ``-async-proofs-tactic-error-resilience all``
+- ``-async-proofs-tactic-error-resilience`` :n:`{*, blocktype}`
+
+Valid proof block types are: “curly”, “par”, “indent”, and “bullet”.
+
+.. _interactive-mode:
+
+Interactive mode
+---------------------
+
+At the time of writing the only user interface supporting asynchronous
+proof processing is |CoqIDE|.
+
+When |CoqIDE| is started, two |Coq| processes are created. The master one
+follows the user, giving feedback as soon as possible by skipping
+proofs, which are delegated to the worker process. The worker process,
+whose state can be seen by clicking on the button in the lower right
+corner of the main |CoqIDE| window, asynchronously processes the proofs.
+If a proof contains an error, it is reported in red in the label of
+the very same button, that can also be used to see the list of errors
+and jump to the corresponding line.
+
+If a proof is processed asynchronously the corresponding Qed command
+is colored using a lighter color that usual. This signals that the
+proof has been delegated to a worker process (or will be processed
+lazily if the ``-async-proofs lazy`` option is used). Once finished, the
+worker process will provide the proof object, but this will not be
+automatically checked by the kernel of the main process. To force the
+kernel to check all the proof objects, one has to click the button
+with the gears. Only then are all the universe constraints checked.
+
+Caveats
+```````
+
+The number of worker processes can be increased by passing |CoqIDE|
+the ``-async-proofs-j n`` flag. Note that the memory consumption increases too,
+since each worker requires the same amount of memory as the master
+process. Also note that increasing the number of workers may reduce
+the reactivity of the master process to user commands.
+
+To disable this feature, one can pass the ``-async-proofs off`` flag to
+|CoqIDE|. Conversely, on Windows, where the feature is disabled by
+default, pass the ``-async-proofs on`` flag to enable it.
+
+Proofs that are known to take little time to process are not delegated
+to a worker process. The threshold can be configure with
+``-async-proofs-delegation-threshold``. Default is 0.03 seconds.
+
+Batch mode
+---------------
+
+When |Coq| is used as a batch compiler by running `coqc` or `coqtop`
+-compile, it produces a `.vo` file for each `.v` file. A `.vo` file contains,
+among other things, theorems statements and proofs. Hence to produce a
+.vo |Coq| need to process all the proofs of the `.v` file.
+
+The asynchronous processing of proofs can decouple the generation of a
+compiled file (like the `.vo` one) that can be loaded by ``Require`` from the
+generation and checking of the proof objects. The ``-quick`` flag can be
+passed to `coqc` or `coqtop` to produce, quickly, `.vio` files.
+Alternatively, when using a Makefile produced by `coq_makefile`,
+the ``quick`` target can be used to compile all files using the ``-quick`` flag.
+
+A `.vio` file can be loaded using ``Require`` exactly as a `.vo` file but
+proofs will not be available (the Print command produces an error).
+Moreover, some universe constraints might be missing, so universes
+inconsistencies might go unnoticed. A `.vio` file does not contain proof
+objects, but proof tasks, i.e. what a worker process can transform
+into a proof object.
+
+Compiling a set of files with the ``-quick`` flag allows one to work,
+interactively, on any file without waiting for all the proofs to be
+checked.
+
+When working interactively, one can fully check all the `.v` files by
+running `coqc` as usual.
+
+Alternatively one can turn each `.vio` into the corresponding `.vo`. All
+.vio files can be processed in parallel, hence this alternative might
+be faster. The command ``coqtop -schedule-vio2vo 2 a b c`` can be used to
+obtain a good scheduling for two workers to produce `a.vo`, `b.vo`, and
+`c.vo`. When using a Makefile produced by `coq_makefile`, the ``vio2vo`` target
+can be used for that purpose. Variable `J` should be set to the number
+of workers, e.g. ``make vio2vo J=2``. The only caveat is that, while the
+.vo files obtained from `.vio` files are complete (they contain all proof
+terms and universe constraints), the satisfiability of all universe
+constraints has not been checked globally (they are checked to be
+consistent for every single proof). Constraints will be checked when
+these `.vo` files are (recursively) loaded with ``Require``.
+
+There is an extra, possibly even faster, alternative: just check the
+proof tasks stored in `.vio` files without producing the `.vo` files. This
+is possibly faster because all the proof tasks are independent, hence
+one can further partition the job to be done between workers. The
+``coqtop -schedule-vio-checking 6 a b c`` command can be used to obtain a
+good scheduling for 6 workers to check all the proof tasks of `a.vio`,
+`b.vio`, and `c.vio`. Auxiliary files are used to predict how long a proof
+task will take, assuming it will take the same amount of time it took
+last time. When using a Makefile produced by coq_makefile, the
+``checkproofs`` target can be used to check all `.vio` files. Variable `J`
+should be set to the number of workers, e.g. ``make checkproofs J=6``. As
+when converting `.vio` files to `.vo` files, universe constraints are not
+checked to be globally consistent. Hence this compilation mode is only
+useful for quick regression testing and on developments not making
+heavy use of the `Type` hierarchy.
+
+Limiting the number of parallel workers
+--------------------------------------------
+
+Many |Coq| processes may run on the same computer, and each of them may
+start many additional worker processes. The `coqworkmgr` utility lets
+one limit the number of workers, globally.
+
+The utility accepts the ``-j`` argument to specify the maximum number of
+workers (defaults to 2). `coqworkmgr` automatically starts in the
+background and prints an environment variable assignment
+like ``COQWORKMGR_SOCKET=localhost:45634``. The user must set this variable
+in all the shells from which |Coq| processes will be started. If one
+uses just one terminal running the bash shell, then
+``export ‘coqworkmgr -j 4‘`` will do the job.
+
+After that, all |Coq| processes, e.g. `coqide` and `coqc`, will honor the
+limit, globally.
diff --git a/doc/sphinx/index.rst b/doc/sphinx/index.rst
index 75e955136..3dd4f8020 100644
--- a/doc/sphinx/index.rst
+++ b/doc/sphinx/index.rst
@@ -54,6 +54,7 @@ Table of contents
addendum/ring
addendum/nsatz
addendum/generalized-rewriting
+ addendum/parallel-proof-processing
.. toctree::
:caption: Reference
diff --git a/interp/constrextern.ml b/interp/constrextern.ml
index 48ddd9496..bb5fd5294 100644
--- a/interp/constrextern.ml
+++ b/interp/constrextern.ml
@@ -589,11 +589,17 @@ let explicitize inctx impl (cf,f) args =
let expl () =
match ip with
| Some i ->
- if not (List.is_empty impl) && is_status_implicit (List.nth impl (i-1)) then
- raise Expl
+ (* Careful: It is possible to have declared implicits ending
+ before the principal argument *)
+ let is_impl =
+ try is_status_implicit (List.nth impl (i-1))
+ with Failure _ -> false
+ in
+ if is_impl
+ then raise Expl
else
let (args1,args2) = List.chop i args in
- let (impl1,impl2) = if List.is_empty impl then [],[] else List.chop i impl in
+ let (impl1,impl2) = try List.chop i impl with Failure _ -> impl, [] in
let args1 = exprec 1 (args1,impl1) in
let args2 = exprec (i+1) (args2,impl2) in
let ip = Some (List.length args1) in
diff --git a/intf/vernacexpr.ml b/intf/vernacexpr.ml
index 0e84a07aa..06f969f19 100644
--- a/intf/vernacexpr.ml
+++ b/intf/vernacexpr.ml
@@ -297,13 +297,9 @@ type inline =
type module_ast_inl = module_ast * inline
type module_binder = bool option * lident list * module_ast_inl
-(** Cumulativity can be set globally, locally or unset locally and it
- can not enabled at all. *)
-type cumulative_inductive_parsing_flag =
- | GlobalCumulativity
- | GlobalNonCumulativity
- | LocalCumulativity
- | LocalNonCumulativity
+(** [Some b] if locally enabled/disabled according to [b], [None] if
+ we should use the global flag. *)
+type vernac_cumulative = VernacCumulative | VernacNonCumulative
(** {6 The type of vernacular expressions} *)
@@ -338,7 +334,7 @@ type nonrec vernac_expr =
| VernacExactProof of constr_expr
| VernacAssumption of (Decl_kinds.discharge * Decl_kinds.assumption_object_kind) *
inline * (ident_decl list * constr_expr) with_coercion list
- | VernacInductive of cumulative_inductive_parsing_flag * Decl_kinds.private_flag * inductive_flag * (inductive_expr * decl_notation list) list
+ | VernacInductive of vernac_cumulative option * Decl_kinds.private_flag * inductive_flag * (inductive_expr * decl_notation list) list
| VernacFixpoint of Decl_kinds.discharge * (fixpoint_expr * decl_notation list) list
| VernacCoFixpoint of Decl_kinds.discharge * (cofixpoint_expr * decl_notation list) list
| VernacScheme of (lident option * scheme) list
@@ -501,14 +497,13 @@ type vernac_type =
| VtProofMode of string
(* Queries are commands assumed to be "pure", that is to say, they
don't modify the interpretation state. *)
- | VtQuery of vernac_part_of_script * Feedback.route_id
+ | VtQuery
(* To be removed *)
| VtMeta
| VtUnknown
and vernac_qed_type = VtKeep | VtKeepAsAxiom | VtDrop (* Qed/Admitted, Abort *)
and vernac_start = string * opacity_guarantee * Id.t list
and vernac_sideff_type = Id.t list
-and vernac_part_of_script = bool
and opacity_guarantee =
| GuaranteesOpacity (** Only generates opaque terms at [Qed] *)
| Doesn'tGuaranteeOpacity (** May generate transparent terms even with [Qed].*)
diff --git a/parsing/g_vernac.ml4 b/parsing/g_vernac.ml4
index 8b445985b..593dcbf58 100644
--- a/parsing/g_vernac.ml4
+++ b/parsing/g_vernac.ml4
@@ -161,20 +161,10 @@ GEXTEND Gram
| IDENT "Let"; id = identref; b = def_body ->
VernacDefinition ((DoDischarge, Let), (lname_of_lident id, None), b)
(* Gallina inductive declarations *)
- | cum = cumulativity_token; priv = private_token; f = finite_token;
+ | cum = OPT cumulativity_token; priv = private_token; f = finite_token;
indl = LIST1 inductive_definition SEP "with" ->
let (k,f) = f in
let indl=List.map (fun ((a,b,c,d),e) -> ((a,b,c,k,d),e)) indl in
- let cum =
- match cum with
- Some true -> LocalCumulativity
- | Some false -> LocalNonCumulativity
- | None ->
- if Flags.is_polymorphic_inductive_cumulativity () then
- GlobalCumulativity
- else
- GlobalNonCumulativity
- in
VernacInductive (cum, priv,f,indl)
| "Fixpoint"; recs = LIST1 rec_definition SEP "with" ->
VernacFixpoint (NoDischarge, recs)
@@ -257,7 +247,8 @@ GEXTEND Gram
| IDENT "Class" -> (Class true,BiFinite) ] ]
;
cumulativity_token:
- [ [ IDENT "Cumulative" -> Some true | IDENT "NonCumulative" -> Some false | -> None ] ]
+ [ [ IDENT "Cumulative" -> VernacCumulative
+ | IDENT "NonCumulative" -> VernacNonCumulative ] ]
;
private_token:
[ [ IDENT "Private" -> true | -> false ] ]
diff --git a/plugins/funind/glob_term_to_relation.ml b/plugins/funind/glob_term_to_relation.ml
index 49f7aae43..319b410df 100644
--- a/plugins/funind/glob_term_to_relation.ml
+++ b/plugins/funind/glob_term_to_relation.ml
@@ -1512,7 +1512,7 @@ let do_build_inductive
in
let msg =
str "while trying to define"++ spc () ++
- Ppvernac.pr_vernac Vernacexpr.(VernacExpr([], VernacInductive(GlobalNonCumulativity,false,Declarations.Finite,repacked_rel_inds)))
+ Ppvernac.pr_vernac Vernacexpr.(VernacExpr([], VernacInductive(None,false,Declarations.Finite,repacked_rel_inds)))
++ fnl () ++
msg
in
@@ -1527,7 +1527,7 @@ let do_build_inductive
in
let msg =
str "while trying to define"++ spc () ++
- Ppvernac.pr_vernac Vernacexpr.(VernacExpr([], VernacInductive(GlobalNonCumulativity,false,Declarations.Finite,repacked_rel_inds)))
+ Ppvernac.pr_vernac Vernacexpr.(VernacExpr([], VernacInductive(None,false,Declarations.Finite,repacked_rel_inds)))
++ fnl () ++
CErrors.print reraise
in
diff --git a/printing/ppvernac.ml b/printing/ppvernac.ml
index bbf0e2b60..7eb8396ac 100644
--- a/printing/ppvernac.ml
+++ b/printing/ppvernac.ml
@@ -767,8 +767,9 @@ open Decl_kinds
if p then
let cm =
match cum with
- | GlobalCumulativity | LocalCumulativity -> "Cumulative"
- | GlobalNonCumulativity | LocalNonCumulativity -> "NonCumulative"
+ | Some VernacCumulative -> "Cumulative"
+ | Some VernacNonCumulative -> "NonCumulative"
+ | None -> ""
in
cm ^ " " ^ kind
else kind
diff --git a/stm/stm.ml b/stm/stm.ml
index a0305efee..ba0a2017a 100644
--- a/stm/stm.ml
+++ b/stm/stm.ml
@@ -2782,16 +2782,9 @@ let process_transaction ?(newtip=Stateid.fresh ())
| VtMeta, _ ->
let id, w = Backtrack.undo_vernac_classifier expr in
process_back_meta_command ~newtip ~head id x w
+
(* Query *)
- | VtQuery (false,route), VtNow ->
- let query_sid = VCS.cur_tip () in
- (try
- let st = Vernacstate.freeze_interp_state `No in
- ignore(stm_vernac_interp ~route query_sid st x)
- with e ->
- let e = CErrors.push e in
- Exninfo.iraise (State.exn_on ~valid:Stateid.dummy query_sid e)); `Ok
- | VtQuery (true, route), w ->
+ | VtQuery, w ->
let id = VCS.new_node ~id:newtip () in
let queue =
if !cur_opt.async_proofs_full then `QueryQueue (ref false)
@@ -2803,9 +2796,6 @@ let process_transaction ?(newtip=Stateid.fresh ())
VCS.commit id (mkTransCmd x [] false queue);
Backtrack.record (); if w == VtNow then ignore(finish ~doc:dummy_doc); `Ok
- | VtQuery (false,_), VtLater ->
- anomaly(str"classifier: VtQuery + VtLater must imply part_of_script.")
-
(* Proof *)
| VtStartProof (mode, guarantee, names), w ->
let id = VCS.new_node ~id:newtip () in
@@ -3048,8 +3038,9 @@ let query ~doc ~at ~route s =
let { CAst.loc; v=ast } = parse_sentence ~doc at s in
let indentation, strlen = compute_indentation ?loc at in
CWarnings.set_current_loc loc;
+ let st = State.get_cached at in
let aast = { verbose = true; indentation; strlen; loc; expr = ast } in
- ignore(process_transaction aast (VtQuery (false,route), VtNow))
+ ignore(stm_vernac_interp ~route at st aast)
done;
with
| End_of_input -> ()
diff --git a/stm/vernac_classifier.ml b/stm/vernac_classifier.ml
index eecee40df..c08cc6e36 100644
--- a/stm/vernac_classifier.ml
+++ b/stm/vernac_classifier.ml
@@ -16,8 +16,6 @@ open Vernacexpr
let default_proof_mode () = Proof_global.get_default_proof_mode_name () [@ocaml.warning "-3"]
-let string_of_in_script b = if b then " (inside script)" else ""
-
let string_of_parallel = function
| `Yes (solve,abs) ->
"par" ^ if solve then "solve" else "" ^ if abs then "abs" else ""
@@ -34,7 +32,7 @@ let string_of_vernac_type = function
"ProofStep " ^ string_of_parallel parallel ^
Option.default "" proof_block_detection
| VtProofMode s -> "ProofMode " ^ s
- | VtQuery (b, route) -> "Query " ^ string_of_in_script b ^ " route " ^ string_of_int route
+ | VtQuery -> "Query"
| VtMeta -> "Meta "
let string_of_vernac_when = function
@@ -70,7 +68,7 @@ let classify_vernac e =
| VernacEndProof _ | VernacExactProof _ -> VtQed VtKeep, VtLater
(* Query *)
| VernacShow _ | VernacPrint _ | VernacSearch _ | VernacLocate _
- | VernacCheckMayEval _ -> VtQuery (true,Feedback.default_route), VtLater
+ | VernacCheckMayEval _ -> VtQuery, VtLater
(* ProofStep *)
| VernacProof _
| VernacFocus _ | VernacUnfocus
@@ -205,7 +203,7 @@ let classify_vernac e =
static_control_classifier ~poly e
| VernacFail e -> (* Fail Qed or Fail Lemma must not join/fork the DAG *)
(match static_control_classifier ~poly e with
- | ( VtQuery _ | VtProofStep _ | VtSideff _
+ | ( VtQuery | VtProofStep _ | VtSideff _
| VtProofMode _ | VtMeta), _ as x -> x
| VtQed _, _ ->
VtProofStep { parallel = `No; proof_block_detection = None },
@@ -214,6 +212,6 @@ let classify_vernac e =
in
static_control_classifier ~poly:(Flags.is_universe_polymorphism ()) e
-let classify_as_query = VtQuery (true,Feedback.default_route), VtLater
+let classify_as_query = VtQuery, VtLater
let classify_as_sideeff = VtSideff [], VtLater
let classify_as_proofstep = VtProofStep { parallel = `No; proof_block_detection = None}, VtLater
diff --git a/test-suite/output/Projections.out b/test-suite/output/Projections.out
new file mode 100644
index 000000000..e9c28faf1
--- /dev/null
+++ b/test-suite/output/Projections.out
@@ -0,0 +1,2 @@
+fun S : store => S.(store_funcs)
+ : store -> host_func
diff --git a/test-suite/output/Projections.v b/test-suite/output/Projections.v
new file mode 100644
index 000000000..098a518dc
--- /dev/null
+++ b/test-suite/output/Projections.v
@@ -0,0 +1,11 @@
+
+Set Printing Projections.
+
+Class HostFunction := host_func : Type.
+
+Section store.
+ Context `{HostFunction}.
+ Record store := { store_funcs : host_func }.
+End store.
+
+Check (fun (S:@store nat) => S.(store_funcs)).
diff --git a/vernac/vernacentries.ml b/vernac/vernacentries.ml
index 0e8ca2289..b44c7cccb 100644
--- a/vernac/vernacentries.ml
+++ b/vernac/vernacentries.ml
@@ -534,17 +534,14 @@ let vernac_assumption ~atts discharge kind l nl =
if not status then Feedback.feedback Feedback.AddedAxiom
let should_treat_as_cumulative cum poly =
- if poly then
- match cum with
- | GlobalCumulativity | LocalCumulativity -> true
- | GlobalNonCumulativity | LocalNonCumulativity -> false
- else
- match cum with
- | GlobalCumulativity | GlobalNonCumulativity -> false
- | LocalCumulativity ->
- user_err Pp.(str "The Cumulative prefix can only be used in a polymorphic context.")
- | LocalNonCumulativity ->
- user_err Pp.(str "The NonCumulative prefix can only be used in a polymorphic context.")
+ match cum with
+ | Some VernacCumulative ->
+ if poly then true
+ else user_err Pp.(str "The Cumulative prefix can only be used in a polymorphic context.")
+ | Some VernacNonCumulative ->
+ if poly then false
+ else user_err Pp.(str "The NonCumulative prefix can only be used in a polymorphic context.")
+ | None -> poly && Flags.is_polymorphic_inductive_cumulativity ()
let vernac_record cum k poly finite struc binders sort nameopt cfs =
let is_cumulative = should_treat_as_cumulative cum poly in
@@ -565,7 +562,6 @@ let vernac_record cum k poly finite struc binders sort nameopt cfs =
indicates whether the type is inductive, co-inductive or
neither. *)
let vernac_inductive ~atts cum lo finite indl =
- let is_cumulative = should_treat_as_cumulative cum atts.polymorphic in
if Dumpglob.dump () then
List.iter (fun (((coe,(lid,_)), _, _, _, cstrs), _) ->
match cstrs with
@@ -602,6 +598,7 @@ let vernac_inductive ~atts cum lo finite indl =
| _ -> user_err Pp.(str "Cannot handle mutually (co)inductive records.")
in
let indl = List.map unpack indl in
+ let is_cumulative = should_treat_as_cumulative cum atts.polymorphic in
ComInductive.do_mutual_inductive indl is_cumulative atts.polymorphic lo finite
let vernac_fixpoint ~atts discharge l =