summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xBinaries/dafny16
-rw-r--r--INSTALL30
-rw-r--r--Source/Dafny/Cloner.cs7
-rw-r--r--Source/Dafny/Compiler.cs8
-rw-r--r--Source/Dafny/Dafny.atg4
-rw-r--r--Source/Dafny/DafnyAst.cs71
-rw-r--r--Source/Dafny/DafnyOptions.cs2
-rw-r--r--Source/Dafny/Parser.cs6
-rw-r--r--Source/Dafny/Printer.cs44
-rw-r--r--Source/Dafny/Resolver.cs309
-rw-r--r--Source/Dafny/Rewriter.cs341
-rw-r--r--Source/Dafny/Translator.cs585
-rw-r--r--Source/DafnyServer/DafnyHelper.cs2
-rw-r--r--Source/DafnyServer/Utilities.cs2
-rw-r--r--Test/VerifyThis2015/Problem1.dfy8
-rw-r--r--Test/dafny0/InductivePredicates.dfy41
-rw-r--r--Test/dafny0/InductivePredicates.dfy.expect6
-rw-r--r--Test/dafny0/Parallel.dfy146
-rw-r--r--Test/dafny3/Filter.dfy238
-rw-r--r--Test/dafny3/Filter.dfy.expect2
-rw-r--r--Test/dafny3/GenericSort.dfy56
-rw-r--r--Test/dafny3/InfiniteTrees.dfy44
-rw-r--r--Test/dafny4/ACL2-extractor.dfy8
-rw-r--r--Test/dafny4/KozenSilva.dfy80
-rw-r--r--Test/dafny4/KozenSilva.dfy.expect2
-rw-r--r--Test/dafny4/NipkowKlein-chapter7.dfy46
-rw-r--r--Test/dafny4/NumberRepresentations.dfy13
-rw-r--r--Test/vstte2012/Tree.dfy32
28 files changed, 1118 insertions, 1031 deletions
diff --git a/Binaries/dafny b/Binaries/dafny
new file mode 100755
index 00000000..b8571bec
--- /dev/null
+++ b/Binaries/dafny
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+MONO=$(which mono)
+DAFNY=$(dirname "${BASH_SOURCE[0]}")/Dafny.exe
+
+if [[ ! -x "$MONO" ]]; then
+ echo "Error: Dafny requires Mono to run on non-Windows systems."
+ exit 1
+fi
+
+if [[ ! -x "$DAFNY" ]]; then
+ echo "Error: Dafny.exe not found at $DAFNY."
+ exit 1
+fi
+
+"$MONO" "$DAFNY"
diff --git a/INSTALL b/INSTALL
index 830a89f4..dbc63410 100644
--- a/INSTALL
+++ b/INSTALL
@@ -9,8 +9,8 @@ Building on Linux
1. Create an empty base directory
- mkdir BASE-DRIECTORY
- cd BASE-DRIECTORY
+ mkdir BASE-DIRECTORY
+ cd BASE-DIRECTORY
2. Download and build Boogie:
@@ -25,8 +25,8 @@ Building on Linux
3. Download and build Dafny:
hg clone https://hg.codeplex.com/dafny
- xbuild dafny/Source/Dafny.sln
- xbuild dafny/Source/DafnyServer.sln
+ cd dafny/Sources/
+ xbuild Dafny.sln
4. Download and build Z3
@@ -38,9 +38,23 @@ Building on Linux
make -j4
cd ../..
-5. Copy (or symlink) the z3 binary so that Dafny and Boogie can find it:
+5. Copy (or symlink) the z3 binary so that Dafny can find it:
cd BASE-DIRECTORY
- rm dafny/Binaries/z3 # Dafny already packages z3
- cp z3/build/z3 dafny/Binaries/z3
- cp z3/build/z3 boogie/Binaries/z3.exe
+ rm -f dafny/Binaries/z3
+ rm -f boogie/Binaries/z3.exe
+ ln -s /usr/bin/z3 dafny/Binaries/z3
+ ln -s /usr/bin/z3 boogie/Binaries/z3.exe
+
+6. Run Dafny using the `dafny` shell script in the Binaries directory.
+
+Editing in Emacs
+================
+
+The README at https://github.com/boogie-org/boogie-friends has plenty of
+information on how to set-up Emacs to work with Dafny. In short, it boils down
+to running [M-x package-install RET boogie-friends RET] and adding the following
+to your .emacs:
+ (setq flycheck-dafny-executable "BASE-DIRECTORy/dafny/")
+
+Do look at the README, though! It's full of useful tips.
diff --git a/Source/Dafny/Cloner.cs b/Source/Dafny/Cloner.cs
index e89a385f..dd2eed69 100644
--- a/Source/Dafny/Cloner.cs
+++ b/Source/Dafny/Cloner.cs
@@ -4,7 +4,6 @@ using System.Collections.Generic;
using System.Numerics;
using System.Diagnostics.Contracts;
using IToken = Microsoft.Boogie.IToken;
-using System.Linq;
namespace Microsoft.Dafny
{
@@ -229,6 +228,9 @@ namespace Microsoft.Dafny
public Attributes CloneAttributes(Attributes attrs) {
if (attrs == null) {
return null;
+ } else if (attrs.Name.StartsWith("_")) {
+ // skip this attribute, since it would have been produced during resolution
+ return CloneAttributes(attrs.Prev);
} else {
return new Attributes(attrs.Name, attrs.Args.ConvertAll(CloneExpr), CloneAttributes(attrs.Prev));
}
@@ -366,7 +368,6 @@ namespace Microsoft.Dafny
return new NamedExpr(Tok(e.tok), e.Name, CloneExpr(e.Body));
} else if (expr is ComprehensionExpr) {
var e = (ComprehensionExpr)expr;
-
var tk = Tok(e.tok);
var bvs = e.BoundVars.ConvertAll(CloneBoundVar);
var range = CloneExpr(e.Range);
@@ -830,7 +831,7 @@ namespace Microsoft.Dafny
apply.Args.ForEach(arg => args.Add(CloneExpr(arg)));
var applyClone = new ApplySuffix(Tok(apply.tok), lhsClone, args);
var c = new ExprRhs(applyClone);
- reporter.Info(MessageSource.Cloner, apply.tok, mse.Member.Name);
+ reporter.Info(MessageSource.Cloner, apply.Lhs.tok, mse.Member.Name);
return c;
}
}
diff --git a/Source/Dafny/Compiler.cs b/Source/Dafny/Compiler.cs
index c969ac1f..c94f7836 100644
--- a/Source/Dafny/Compiler.cs
+++ b/Source/Dafny/Compiler.cs
@@ -1201,13 +1201,7 @@ namespace Microsoft.Dafny {
Contract.Requires(cce.NonNullElements(targs));
Contract.Ensures(Contract.Result<string>() != null);
- string s = "";
- string sep = "";
- foreach (TypeParameter tp in targs) {
- s += sep + "@" + tp.CompileName;
- sep = ",";
- }
- return s;
+ return Util.Comma(targs, tp => "@" + tp.CompileName);
}
string DefaultValue(Type type)
diff --git a/Source/Dafny/Dafny.atg b/Source/Dafny/Dafny.atg
index 07f8e1c4..954448af 100644
--- a/Source/Dafny/Dafny.atg
+++ b/Source/Dafny/Dafny.atg
@@ -2915,10 +2915,10 @@ Expressions<.List<Expression> args.>
.
/*------------------------------------------------------------------------*/
Attribute<ref Attributes attrs>
-= (. string name;
+= (. IToken x; string name;
var args = new List<Expression>();
.)
- "{" ":" ident (. name = t.val; .)
+ "{" ":" NoUSIdent<out x> (. name = x.val; .)
[ Expressions<args> ]
"}"
(. attrs = new Attributes(name, args, attrs); .)
diff --git a/Source/Dafny/DafnyAst.cs b/Source/Dafny/DafnyAst.cs
index e5d8250b..ffd6df63 100644
--- a/Source/Dafny/DafnyAst.cs
+++ b/Source/Dafny/DafnyAst.cs
@@ -2961,19 +2961,6 @@ namespace Microsoft.Dafny {
}
}
- /// <summary>
- /// A "ThisSurrogate" is used during translation time to make the treatment of the receiver more similar to
- /// the treatment of other in-parameters.
- /// </summary>
- public class ThisSurrogate : Formal
- {
- public ThisSurrogate(IToken tok, Type type)
- : base(tok, "this", type, true, false) {
- Contract.Requires(tok != null);
- Contract.Requires(type != null);
- }
- }
-
public class BoundVar : NonglobalVariable {
public override bool IsMutable {
get {
@@ -3947,6 +3934,9 @@ namespace Microsoft.Dafny {
public override bool IsFinite {
get { return false; }
}
+ public override int Preference() {
+ return 0;
+ }
}
/// <summary>
@@ -5758,6 +5748,16 @@ namespace Microsoft.Dafny {
Contract.Requires(name != null);
Name = name;
}
+ /// <summary>
+ /// Constructs a resolved IdentifierExpr.
+ /// </summary>
+ public IdentifierExpr(IVariable v)
+ : base(v.Tok) {
+ Contract.Requires(v != null);
+ Name = v.Name;
+ Var = v;
+ Type = v.Type;
+ }
}
/// <summary>
@@ -6692,9 +6692,33 @@ namespace Microsoft.Dafny {
public virtual bool IsFinite {
get { return true; } // most bounds are finite
}
+ public abstract int Preference(); // higher is better
+
+ public static BoundedPool GetBest(List<BoundedPool> bounds) {
+ Contract.Requires(bounds != null && bounds.Count > 0);
+ var ret = bounds[0];
+ foreach (var bound in bounds) {
+ if (bound.Preference() > ret.Preference()) {
+ ret = bound;
+ } else {
+ var retInt = ret as ComprehensionExpr.IntBoundedPool;
+ if (retInt != null && (retInt.LowerBound == null || retInt.UpperBound == null)) {
+ var boundInt = bound as ComprehensionExpr.IntBoundedPool;
+ if (boundInt != null) {
+ ret = new ComprehensionExpr.IntBoundedPool(retInt.LowerBound ?? boundInt.LowerBound,
+ retInt.UpperBound ?? boundInt.UpperBound);
+ }
+ }
+ }
+ }
+ return ret;
+ }
}
public class BoolBoundedPool : BoundedPool
{
+ public override int Preference() {
+ return 5;
+ }
}
public class IntBoundedPool : BoundedPool
{
@@ -6709,36 +6733,57 @@ namespace Microsoft.Dafny {
return LowerBound != null && UpperBound != null;
}
}
+ public override int Preference() {
+ return 1;
+ }
}
public class SetBoundedPool : BoundedPool
{
public readonly Expression Set;
public SetBoundedPool(Expression set) { Set = set; }
+ public override int Preference() {
+ return 10;
+ }
}
public class SubSetBoundedPool : BoundedPool
{
public readonly Expression UpperBound;
public SubSetBoundedPool(Expression set) { UpperBound = set; }
+ public override int Preference() {
+ return 1;
+ }
}
public class SuperSetBoundedPool : BoundedPool
{
public readonly Expression LowerBound;
public SuperSetBoundedPool(Expression set) { LowerBound = set; }
+ public override int Preference() {
+ return 0;
+ }
}
public class MapBoundedPool : BoundedPool
{
public readonly Expression Map;
public MapBoundedPool(Expression map) { Map = map; }
+ public override int Preference() {
+ return 10;
+ }
}
public class SeqBoundedPool : BoundedPool
{
public readonly Expression Seq;
public SeqBoundedPool(Expression seq) { Seq = seq; }
+ public override int Preference() {
+ return 10;
+ }
}
public class DatatypeBoundedPool : BoundedPool
{
public readonly DatatypeDecl Decl;
public DatatypeBoundedPool(DatatypeDecl d) { Decl = d; }
+ public override int Preference() {
+ return 5;
+ }
}
public List<BoundedPool> Bounds; // initialized and filled in by resolver
diff --git a/Source/Dafny/DafnyOptions.cs b/Source/Dafny/DafnyOptions.cs
index 245632ad..1986ea6d 100644
--- a/Source/Dafny/DafnyOptions.cs
+++ b/Source/Dafny/DafnyOptions.cs
@@ -313,7 +313,7 @@ namespace Microsoft.Dafny
2 - apply induction as requested (by attributes) and also
for heuristically chosen quantifiers
3 (default) - apply induction as requested, and for
- heuristically chosen quantifiers and ghost methods
+ heuristically chosen quantifiers and lemmas
/inductionHeuristic:<n>
0 - least discriminating induction heuristic (that is, lean
toward applying induction more often)
diff --git a/Source/Dafny/Parser.cs b/Source/Dafny/Parser.cs
index 2507cacc..7dafb572 100644
--- a/Source/Dafny/Parser.cs
+++ b/Source/Dafny/Parser.cs
@@ -1033,13 +1033,13 @@ bool IsType(ref IToken pt) {
}
void Attribute(ref Attributes attrs) {
- string name;
+ IToken x; string name;
var args = new List<Expression>();
Expect(45);
Expect(21);
- Expect(1);
- name = t.val;
+ NoUSIdent(out x);
+ name = x.val;
if (StartOf(7)) {
Expressions(args);
}
diff --git a/Source/Dafny/Printer.cs b/Source/Dafny/Printer.cs
index 971fe867..ce8b54bb 100644
--- a/Source/Dafny/Printer.cs
+++ b/Source/Dafny/Printer.cs
@@ -109,6 +109,15 @@ namespace Microsoft.Dafny {
}
}
+ public static string OneAttributeToString(Attributes a, string nameSubstitution = null) {
+ Contract.Requires(a != null);
+ using (var wr = new System.IO.StringWriter()) {
+ var pr = new Printer(wr);
+ pr.PrintOneAttribute(a, nameSubstitution);
+ return ToStringWithoutNewline(wr);
+ }
+ }
+
public static string ToStringWithoutNewline(System.IO.StringWriter wr) {
Contract.Requires(wr != null);
var sb = wr.GetStringBuilder();
@@ -429,15 +438,20 @@ namespace Microsoft.Dafny {
public void PrintAttributes(Attributes a) {
if (a != null) {
PrintAttributes(a.Prev);
-
- wr.Write(" {{:{0}", a.Name);
- if (a.Args != null)
- {
- PrintAttributeArgs(a.Args, false);
- }
- wr.Write("}");
+ wr.Write(" ");
+ PrintOneAttribute(a);
}
}
+ public void PrintOneAttribute(Attributes a, string nameSubstitution = null) {
+ Contract.Requires(a != null);
+ var name = nameSubstitution ?? a.Name;
+ var usAttribute = name.StartsWith("_");
+ wr.Write("{1}{{:{0}", name, usAttribute ? "/*" : "");
+ if (a.Args != null) {
+ PrintAttributeArgs(a.Args, false);
+ }
+ wr.Write("}}{0}", usAttribute ? "*/" : "");
+ }
public void PrintAttributeArgs(List<Expression> args, bool isFollowedBySemicolon) {
Contract.Requires(args != null);
@@ -1430,7 +1444,7 @@ namespace Microsoft.Dafny {
bool parensNeeded = ParensNeeded(opBindingStrength, contextBindingStrength, fragileContext);
if (parensNeeded) { wr.Write("("); }
- PrintExpr(e.Seq, 0x00, false, false, !parensNeeded && isFollowedBySemicolon, indent); // BOGUS: fix me
+ PrintExpr(e.Seq, opBindingStrength, false, false, !parensNeeded && isFollowedBySemicolon, indent);
wr.Write("[");
if (e.SelectOne) {
Contract.Assert( e.E0 != null);
@@ -1454,7 +1468,7 @@ namespace Microsoft.Dafny {
bool parensNeeded = ParensNeeded(opBindingStrength, contextBindingStrength, fragileContext);
if (parensNeeded) { wr.Write("("); }
- PrintExpr(e.Array, 0x00, false, false, !parensNeeded && isFollowedBySemicolon, indent); // BOGUS: fix me
+ PrintExpr(e.Array, opBindingStrength, false, false, !parensNeeded && isFollowedBySemicolon, indent);
string prefix = "[";
foreach (Expression idx in e.Indices) {
Contract.Assert(idx != null);
@@ -1478,7 +1492,7 @@ namespace Microsoft.Dafny {
bool parensNeeded = ParensNeeded(opBindingStrength, contextBindingStrength, fragileContext);
if (parensNeeded) { wr.Write("("); }
- PrintExpr(e.Seq, 00, false, false, !parensNeeded && isFollowedBySemicolon, indent); // BOGUS: fix me
+ PrintExpr(e.Seq, opBindingStrength, false, false, !parensNeeded && isFollowedBySemicolon, indent);
wr.Write("[");
PrintExpression(e.Index, false);
wr.Write(" := ");
@@ -1797,9 +1811,12 @@ namespace Microsoft.Dafny {
} else if (expr is LambdaExpr) {
var e = (LambdaExpr)expr;
- wr.Write("(");
- wr.Write(Util.Comma(e.BoundVars, bv => bv.DisplayName + ":" + bv.Type));
- wr.Write(")");
+ bool parensNeeded = !isRightmost;
+ if (parensNeeded) { wr.Write("("); }
+ var skipSignatureParens = e.BoundVars.Count == 1 && e.BoundVars[0].Type is InferredTypeProxy;
+ if (!skipSignatureParens) { wr.Write("("); }
+ wr.Write(Util.Comma(", ", e.BoundVars, bv => bv.DisplayName + (bv.Type is InferredTypeProxy ? "" : ": " + bv.Type)));
+ if (!skipSignatureParens) { wr.Write(")"); }
if (e.Range != null) {
wr.Write(" requires ");
PrintExpression(e.Range, false);
@@ -1810,6 +1827,7 @@ namespace Microsoft.Dafny {
}
wr.Write(e.OneShot ? " -> " : " => ");
PrintExpression(e.Body, isFollowedBySemicolon);
+ if (parensNeeded) { wr.Write(")"); }
} else if (expr is WildcardExpr) {
wr.Write("*");
diff --git a/Source/Dafny/Resolver.cs b/Source/Dafny/Resolver.cs
index 34d4e5c7..ca667ae0 100644
--- a/Source/Dafny/Resolver.cs
+++ b/Source/Dafny/Resolver.cs
@@ -237,10 +237,12 @@ namespace Microsoft.Dafny
rewriters.Add(new TimeLimitRewriter(reporter));
if (DafnyOptions.O.AutoTriggers) {
- rewriters.Add(new QuantifierSplittingRewriter(this.reporter));
- rewriters.Add(new TriggerGeneratingRewriter(this.reporter));
+ rewriters.Add(new QuantifierSplittingRewriter(reporter));
+ rewriters.Add(new TriggerGeneratingRewriter(reporter));
}
+ rewriters.Add(new InductionRewriter(reporter));
+
systemNameInfo = RegisterTopLevelDecls(prog.BuiltIns.SystemModule, false);
prog.CompileModules.Add(prog.BuiltIns.SystemModule);
@@ -481,14 +483,7 @@ namespace Microsoft.Dafny
showIt = ((Method)m).IsRecursive;
}
if (showIt) {
- s += "decreases ";
- if (m.Decreases.Expressions.Count != 0) {
- string sep = "";
- foreach (var d in m.Decreases.Expressions) {
- s += sep + Printer.ExprToString(d);
- sep = ", ";
- }
- }
+ s += "decreases " + Util.Comma(", ", m.Decreases.Expressions, Printer.ExprToString);
// Note, in the following line, we use the location information for "clbl", not "m". These
// are the same, except in the case where "clbl" is a CoLemma and "m" is a prefix lemma.
reporter.Info(MessageSource.Resolver, clbl.Tok, s);
@@ -5102,15 +5097,7 @@ namespace Microsoft.Dafny
ResolveStatement(s.Body, bodyMustBeSpecOnly, codeContext);
loopStack.RemoveAt(loopStack.Count - 1); // pop
} else {
- string text = "havoc {";
- if (fvs.Count != 0) {
- string sep = "";
- foreach (var fv in fvs) {
- text += sep + fv.Name;
- sep = ", ";
- }
- }
- text += "};"; // always terminate with a semi-colon
+ string text = "havoc {" + Util.Comma(", ", fvs, fv => fv.Name) + "};"; // always terminate with a semi-colon
reporter.Info(MessageSource.Resolver, s.Tok, text);
}
@@ -5209,7 +5196,7 @@ namespace Microsoft.Dafny
// add the conclusion of the calc as a free postcondition
var result = ((CalcStmt)s0).Result;
s.Ens.Add(new MaybeFreeExpression(result, true));
- reporter.Info(MessageSource.Resolver, s.Tok, "ensures " + Printer.ExprToString(result) + ";");
+ reporter.Info(MessageSource.Resolver, s.Tok, "ensures " + Printer.ExprToString(result));
} else {
s.Kind = ForallStmt.ParBodyKind.Proof;
if (s.Body is BlockStmt && ((BlockStmt)s.Body).Body.Count == 0) {
@@ -5408,7 +5395,7 @@ namespace Microsoft.Dafny
}
if (dtd != null && memberNamesUsed.Count != dtd.Ctors.Count) {
// We could complain about the syntactic omission of constructors:
- // Error(stmt, "match statement does not cover all constructors");
+ // reporter.Error(MessageSource.Resolver, stmt, "match statement does not cover all constructors");
// but instead we let the verifier do a semantic check.
// So, for now, record the missing constructors:
foreach (var ctr in dtd.Ctors) {
@@ -5815,15 +5802,7 @@ namespace Microsoft.Dafny
loopStmt.InferredDecreases = true;
}
if (loopStmt.InferredDecreases) {
- string s = "decreases ";
- if (theDecreases.Count != 0) {
- string sep = "";
- foreach (var d in theDecreases) {
- s += sep + Printer.ExprToString(d);
- sep = ", ";
- }
- }
- s += ";"; // always terminate with a semi-colon, even in the case of an empty decreases clause
+ string s = "decreases " + Util.Comma(", ", theDecreases, Printer.ExprToString);
reporter.Info(MessageSource.Resolver, loopStmt.Tok, s);
}
}
@@ -6002,8 +5981,7 @@ namespace Microsoft.Dafny
s.Bounds = new List<ComprehensionExpr.BoundedPool>();
foreach (var pair in allBounds) {
Contract.Assert(1 <= pair.Item2.Count);
- // TODO: The following could be improved by picking the bound that is most likely to give rise to an efficient compiled program
- s.Bounds.Add(pair.Item2[0]);
+ s.Bounds.Add(ComprehensionExpr.BoundedPool.GetBest(pair.Item2));
}
}
}
@@ -7899,7 +7877,7 @@ namespace Microsoft.Dafny
}
if (dtd != null && memberNamesUsed.Count != dtd.Ctors.Count) {
// We could complain about the syntactic omission of constructors:
- // Error(expr, "match expression does not cover all constructors");
+ // reporter.Error(MessageSource.Resolver, expr, "match expression does not cover all constructors");
// but instead we let the verifier do a semantic check.
// So, for now, record the missing constructors:
foreach (var ctr in dtd.Ctors) {
@@ -8447,7 +8425,7 @@ namespace Microsoft.Dafny
receiver = new StaticReceiverExpr(expr.tok, (ClassDecl)member.EnclosingClass);
} else {
if (!scope.AllowInstance) {
- Error(expr.tok, "'this' is not allowed in a 'static' context");
+ reporter.Error(MessageSource.Resolver, expr.tok, "'this' is not allowed in a 'static' context");
// nevertheless, set "receiver" to a value so we can continue resolution
}
receiver = new ImplicitThisExpr(expr.tok);
@@ -8473,7 +8451,7 @@ namespace Microsoft.Dafny
// ----- 3. static member of the enclosing module
Contract.Assert(member.IsStatic); // moduleInfo.StaticMembers is supposed to contain only static members of the module's implicit class _default
if (ReallyAmbiguousThing(ref member)) {
- Error(expr.tok, "The name {0} ambiguously refers to a static member in one of the modules {1} (try qualifying the member name with the module name)", expr.Name, ((AmbiguousMemberDecl)member).ModuleNames());
+ reporter.Error(MessageSource.Resolver, expr.tok, "The name {0} ambiguously refers to a static member in one of the modules {1} (try qualifying the member name with the module name)", expr.Name, ((AmbiguousMemberDecl)member).ModuleNames());
} else {
var receiver = new StaticReceiverExpr(expr.tok, (ClassDecl)member.EnclosingClass);
r = ResolveExprDotCall(expr.tok, receiver, member, expr.OptTypeArguments, opts.codeContext, allowMethodCall);
@@ -8749,7 +8727,7 @@ namespace Microsoft.Dafny
// ----- 1. static member of the specified module
Contract.Assert(member.IsStatic); // moduleInfo.StaticMembers is supposed to contain only static members of the module's implicit class _default
if (ReallyAmbiguousThing(ref member)) {
- Error(expr.tok, "The name {0} ambiguously refers to a static member in one of the modules {1} (try qualifying the member name with the module name)", expr.SuffixName, ((AmbiguousMemberDecl)member).ModuleNames());
+ reporter.Error(MessageSource.Resolver, expr.tok, "The name {0} ambiguously refers to a static member in one of the modules {1} (try qualifying the member name with the module name)", expr.SuffixName, ((AmbiguousMemberDecl)member).ModuleNames());
} else {
var receiver = new StaticReceiverExpr(expr.tok, (ClassDecl)member.EnclosingClass);
r = ResolveExprDotCall(expr.tok, receiver, member, expr.OptTypeArguments, opts.codeContext, allowMethodCall);
@@ -9375,136 +9353,148 @@ namespace Microsoft.Dafny
expr = polarity ? Expression.CreateAnd(c, expr) : Expression.CreateImplies(c, expr);
}
for (int j = 0; j < bvars.Count; j++) {
- var bv = bvars[j];
- var bounds = new List<ComprehensionExpr.BoundedPool>();
- if (bv.Type.IsBoolType) {
- // easy
- bounds.Add(new ComprehensionExpr.BoolBoundedPool());
- } else {
- bool foundBoundsForBv = false;
- if (bv.Type.IsIndDatatype && bv.Type.AsIndDatatype.HasFinitePossibleValues) {
- bounds.Add(new ComprehensionExpr.DatatypeBoundedPool(bv.Type.AsIndDatatype));
- foundBoundsForBv = true;
+ VT bv;
+ List<ComprehensionExpr.BoundedPool> bounds;
+ foundError = DiscoverAuxSingle<VT>(bvars, expr, polarity, returnAllBounds, true, allowAnyIntegers, missingBounds, foundError, j, out bv, out bounds);
+ if (!returnAllBounds && bounds.Count > 1) {
+ var best = ComprehensionExpr.BoundedPool.GetBest(bounds);
+ bounds = new List<ComprehensionExpr.BoundedPool>() { best };
+ }
+
+ allBounds.Add(new Tuple<VT, List<ComprehensionExpr.BoundedPool>>(bv, bounds));
+ }
+ return foundError ? null : allBounds;
+ }
+
+ private static bool DiscoverAuxSingle<VT>(List<VT> bvars, Expression expr, bool polarity, bool allowPartialBounds, bool returnAllBounds, bool allowAnyIntegers, List<VT> missingBounds, bool foundError, int j, out VT bv, out List<ComprehensionExpr.BoundedPool> bounds) where VT : IVariable {
+ bv = bvars[j];
+ bounds = new List<ComprehensionExpr.BoundedPool>();
+ if (bv.Type.IsBoolType) {
+ // easy
+ bounds.Add(new ComprehensionExpr.BoolBoundedPool());
+ } else {
+ bool foundBoundsForBv = false;
+ if (bv.Type.IsIndDatatype && bv.Type.AsIndDatatype.HasFinitePossibleValues) {
+ bounds.Add(new ComprehensionExpr.DatatypeBoundedPool(bv.Type.AsIndDatatype));
+ foundBoundsForBv = true;
+ }
+ // Go through the conjuncts of the range expression to look for bounds.
+ Expression lowerBound = null;
+ Expression upperBound = null;
+ if ((allowPartialBounds || returnAllBounds) && lowerBound != null) {
+ bounds.Add(new ComprehensionExpr.IntBoundedPool(lowerBound, upperBound));
+ lowerBound = null;
+ foundBoundsForBv = true;
+ }
+ foreach (var conjunct in NormalizedConjuncts(expr, polarity)) {
+ var c = conjunct as BinaryExpr;
+ if (c == null) {
+ goto CHECK_NEXT_CONJUNCT;
+ }
+ var e0 = c.E0;
+ var e1 = c.E1;
+ int whereIsBv = SanitizeForBoundDiscovery(bvars, j, c.ResolvedOp, ref e0, ref e1);
+ if (whereIsBv < 0) {
+ goto CHECK_NEXT_CONJUNCT;
+ }
+ switch (c.ResolvedOp) {
+ case BinaryExpr.ResolvedOpcode.InSet:
+ if (whereIsBv == 0) {
+ bounds.Add(new ComprehensionExpr.SetBoundedPool(e1));
+ foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.Subset:
+ if (returnAllBounds) {
+ if (whereIsBv == 0) {
+ bounds.Add(new ComprehensionExpr.SubSetBoundedPool(e1));
+ } else {
+ bounds.Add(new ComprehensionExpr.SuperSetBoundedPool(e0));
+ }
+ foundBoundsForBv = true;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.InMultiSet:
+ if (whereIsBv == 0) {
+ bounds.Add(new ComprehensionExpr.SetBoundedPool(e1));
+ foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.InSeq:
+ if (whereIsBv == 0) {
+ bounds.Add(new ComprehensionExpr.SeqBoundedPool(e1));
+ foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.InMap:
+ if (whereIsBv == 0 && e1.Type.AsMapType.Finite) {
+ bounds.Add(new ComprehensionExpr.MapBoundedPool(e1));
+ foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.EqCommon:
+ if (bv.Type is IntType) {
+ var otherOperand = whereIsBv == 0 ? e1 : e0;
+ bounds.Add(new ComprehensionExpr.IntBoundedPool(otherOperand, Expression.CreateIncrement(otherOperand, 1)));
+ foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
+ } else if (returnAllBounds && bv.Type is SetType) {
+ var otherOperand = whereIsBv == 0 ? e1 : e0;
+ bounds.Add(new ComprehensionExpr.SubSetBoundedPool(otherOperand));
+ foundBoundsForBv = true;
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.Gt:
+ case BinaryExpr.ResolvedOpcode.Ge:
+ Contract.Assert(false); throw new cce.UnreachableException(); // promised by postconditions of NormalizedConjunct
+ case BinaryExpr.ResolvedOpcode.Lt:
+ if (e0.Type.IsNumericBased(Type.NumericPersuation.Int)) {
+ if (whereIsBv == 0 && upperBound == null) {
+ upperBound = e1; // bv < E
+ } else if (whereIsBv == 1 && lowerBound == null) {
+ lowerBound = Expression.CreateIncrement(e0, 1); // E < bv
+ }
+ }
+ break;
+ case BinaryExpr.ResolvedOpcode.Le:
+ if (e0.Type.IsNumericBased(Type.NumericPersuation.Int)) {
+ if (whereIsBv == 0 && upperBound == null) {
+ upperBound = Expression.CreateIncrement(e1, 1); // bv <= E
+ } else if (whereIsBv == 1 && lowerBound == null) {
+ lowerBound = e0; // E <= bv
+ }
+ }
+ break;
+ default:
+ break;
}
- // Go through the conjuncts of the range expression to look for bounds.
- Expression lowerBound = null;
- Expression upperBound = null;
- if (returnAllBounds && lowerBound != null) {
+ if ((lowerBound != null && upperBound != null) ||
+ (allowPartialBounds && (lowerBound != null || upperBound != null))) {
+ // we have found two halves (or, in the case of returnAllBounds, we have found some bound)
bounds.Add(new ComprehensionExpr.IntBoundedPool(lowerBound, upperBound));
lowerBound = null;
+ upperBound = null;
foundBoundsForBv = true;
+ if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
}
- foreach (var conjunct in NormalizedConjuncts(expr, polarity)) {
- var c = conjunct as BinaryExpr;
- if (c == null) {
- goto CHECK_NEXT_CONJUNCT;
- }
- var e0 = c.E0;
- var e1 = c.E1;
- int whereIsBv = SanitizeForBoundDiscovery(bvars, j, c.ResolvedOp, ref e0, ref e1);
- if (whereIsBv < 0) {
- goto CHECK_NEXT_CONJUNCT;
- }
- switch (c.ResolvedOp) {
- case BinaryExpr.ResolvedOpcode.InSet:
- if (whereIsBv == 0) {
- bounds.Add(new ComprehensionExpr.SetBoundedPool(e1));
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- }
- break;
- case BinaryExpr.ResolvedOpcode.Subset:
- if (returnAllBounds) {
- if (whereIsBv == 0) {
- bounds.Add(new ComprehensionExpr.SubSetBoundedPool(e1));
- } else {
- bounds.Add(new ComprehensionExpr.SuperSetBoundedPool(e0));
- }
- foundBoundsForBv = true;
- }
- break;
- case BinaryExpr.ResolvedOpcode.InMultiSet:
- if (whereIsBv == 0) {
- bounds.Add(new ComprehensionExpr.SetBoundedPool(e1));
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- }
- break;
- case BinaryExpr.ResolvedOpcode.InSeq:
- if (whereIsBv == 0) {
- bounds.Add(new ComprehensionExpr.SeqBoundedPool(e1));
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- }
- break;
- case BinaryExpr.ResolvedOpcode.InMap:
- if (whereIsBv == 0 && e1.Type.AsMapType.Finite) {
- bounds.Add(new ComprehensionExpr.MapBoundedPool(e1));
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- }
- break;
- case BinaryExpr.ResolvedOpcode.EqCommon:
- if (bv.Type is IntType) {
- var otherOperand = whereIsBv == 0 ? e1 : e0;
- bounds.Add(new ComprehensionExpr.IntBoundedPool(otherOperand, Expression.CreateIncrement(otherOperand, 1)));
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- } else if (returnAllBounds && bv.Type is SetType) {
- var otherOperand = whereIsBv == 0 ? e1 : e0;
- bounds.Add(new ComprehensionExpr.SubSetBoundedPool(otherOperand));
- foundBoundsForBv = true;
- }
- break;
- case BinaryExpr.ResolvedOpcode.Gt:
- case BinaryExpr.ResolvedOpcode.Ge:
- Contract.Assert(false); throw new cce.UnreachableException(); // promised by postconditions of NormalizedConjunct
- case BinaryExpr.ResolvedOpcode.Lt:
- if (e0.Type.IsNumericBased(Type.NumericPersuation.Int)) {
- if (whereIsBv == 0 && upperBound == null) {
- upperBound = e1; // bv < E
- } else if (whereIsBv == 1 && lowerBound == null) {
- lowerBound = Expression.CreateIncrement(e0, 1); // E < bv
- }
- }
- break;
- case BinaryExpr.ResolvedOpcode.Le:
- if (e0.Type.IsNumericBased(Type.NumericPersuation.Int)) {
- if (whereIsBv == 0 && upperBound == null) {
- upperBound = Expression.CreateIncrement(e1, 1); // bv <= E
- } else if (whereIsBv == 1 && lowerBound == null) {
- lowerBound = e0; // E <= bv
- }
- }
- break;
- default:
- break;
- }
- if ((lowerBound != null && upperBound != null) ||
- (returnAllBounds && (lowerBound != null || upperBound != null))) {
- // we have found two halves (or, in the case of returnAllBounds, we have found some bound)
- bounds.Add(new ComprehensionExpr.IntBoundedPool(lowerBound, upperBound));
- lowerBound = null;
- upperBound = null;
- foundBoundsForBv = true;
- if (!returnAllBounds) goto CHECK_NEXT_BOUND_VARIABLE;
- }
- CHECK_NEXT_CONJUNCT: ;
- }
- if (!foundBoundsForBv) {
- // we have checked every conjunct in the range expression and still have not discovered good bounds
- if (allowAnyIntegers && bv.Type is IntType) {
- bounds.Add(new AssignSuchThatStmt.WiggleWaggleBound());
- } else {
- missingBounds.Add(bv); // record failing bound variable
- foundError = true;
- }
+ CHECK_NEXT_CONJUNCT: ;
+ }
+ if (!foundBoundsForBv) {
+ // we have checked every conjunct in the range expression and still have not discovered good bounds
+ if (allowAnyIntegers && bv.Type is IntType) {
+ bounds.Add(new AssignSuchThatStmt.WiggleWaggleBound());
+ } else {
+ missingBounds.Add(bv); // record failing bound variable
+ foundError = true;
}
}
- CHECK_NEXT_BOUND_VARIABLE: ; // should goto here only if the bound for the current variable has been discovered (otherwise, return with null from this method)
- allBounds.Add(new Tuple<VT, List<ComprehensionExpr.BoundedPool>>(bv, bounds));
}
- return foundError ? null : allBounds;
+ CHECK_NEXT_BOUND_VARIABLE: ; // should goto here only if the bound for the current variable has been discovered (otherwise, return with null from this method)
+ return foundError;
}
static Expression TypeConstraint(IVariable bv, Type ty, ErrorReporter reporter) {
@@ -9777,7 +9767,6 @@ namespace Microsoft.Dafny
if (expr is IdentifierExpr) {
var e = (IdentifierExpr)expr;
-
return new HashSet<IVariable>() { e.Var };
} else if (expr is QuantifierExpr) {
@@ -10378,7 +10367,7 @@ namespace Microsoft.Dafny
// this call is disqualified from being a co-call, because it has a postcondition
// (a postcondition could be allowed, as long as it does not get to be used with
// co-recursive calls, because that could be unsound; for example, consider
- // "ensures false;")
+ // "ensures false")
if (!dealsWithCodatatypes) {
e.CoCall = FunctionCallExpr.CoCallResolution.No;
} else {
diff --git a/Source/Dafny/Rewriter.cs b/Source/Dafny/Rewriter.cs
index b6409b96..83f49a12 100644
--- a/Source/Dafny/Rewriter.cs
+++ b/Source/Dafny/Rewriter.cs
@@ -1290,6 +1290,347 @@ namespace Microsoft.Dafny
return new AutoGeneratedToken(tok);
}
}
+
+ // ===========================================================================================
+
+ public class InductionRewriter : IRewriter {
+ internal InductionRewriter(ErrorReporter reporter) : base(reporter) {
+ Contract.Requires(reporter != null);
+ }
+
+ internal override void PostCyclicityResolve(ModuleDefinition m) {
+ if (DafnyOptions.O.Induction == 0) {
+ // Don't bother inferring :induction attributes. This will also have the effect of not warning about malformed :induction attributes
+ } else {
+ foreach (var decl in m.TopLevelDecls) {
+ if (decl is ClassDecl) {
+ var cl = (ClassDecl)decl;
+ foreach (var member in cl.Members) {
+ if (member is FixpointLemma) {
+ var method = (FixpointLemma)member;
+ ProcessMethodExpressions(method);
+ ComputeLemmaInduction(method.PrefixLemma);
+ ProcessMethodExpressions(method.PrefixLemma);
+ } else if (member is Method) {
+ var method = (Method)member;
+ ComputeLemmaInduction(method);
+ ProcessMethodExpressions(method);
+ } else if (member is FixpointPredicate) {
+ var function = (FixpointPredicate)member;
+ ProcessFunctionExpressions(function);
+ ProcessFunctionExpressions(function.PrefixPredicate);
+ } else if (member is Function) {
+ var function = (Function)member;
+ ProcessFunctionExpressions(function);
+ }
+ }
+ } else if (decl is NewtypeDecl) {
+ var nt = (NewtypeDecl)decl;
+ if (nt.Constraint != null) {
+ var visitor = new Induction_Visitor(this);
+ visitor.Visit(nt.Constraint);
+ }
+ }
+ }
+ }
+ }
+
+ void ProcessMethodExpressions(Method method) {
+ Contract.Requires(method != null);
+ var visitor = new Induction_Visitor(this);
+ method.Req.ForEach(mfe => visitor.Visit(mfe.E));
+ method.Ens.ForEach(mfe => visitor.Visit(mfe.E));
+ if (method.Body != null) {
+ visitor.Visit(method.Body);
+ }
+ }
+
+ void ProcessFunctionExpressions(Function function) {
+ Contract.Requires(function != null);
+ var visitor = new Induction_Visitor(this);
+ function.Req.ForEach(visitor.Visit);
+ function.Ens.ForEach(visitor.Visit);
+ if (function.Body != null) {
+ visitor.Visit(function.Body);
+ }
+ }
+
+ void ComputeLemmaInduction(Method method) {
+ Contract.Requires(method != null);
+ if (method.Body != null && method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && !(method is FixpointLemma)) {
+ var specs = new List<Expression>();
+ method.Req.ForEach(mfe => specs.Add(mfe.E));
+ method.Ens.ForEach(mfe => specs.Add(mfe.E));
+ ComputeInductionVariables(method.tok, method.Ins, specs, method, ref method.Attributes);
+ }
+ }
+
+ void ComputeInductionVariables<VarType>(IToken tok, List<VarType> boundVars, List<Expression> searchExprs, Method lemma, ref Attributes attributes) where VarType : class, IVariable {
+ Contract.Requires(tok != null);
+ Contract.Requires(boundVars != null);
+ Contract.Requires(searchExprs != null);
+ Contract.Requires(DafnyOptions.O.Induction != 0);
+
+ var args = Attributes.FindExpressions(attributes, "induction"); // we only look at the first one we find, since it overrides any other ones
+ if (args == null) {
+ if (DafnyOptions.O.Induction < 2) {
+ // No explicit induction variables and we're asked not to infer anything, so we're done
+ return;
+ } else if (DafnyOptions.O.Induction == 2 && lemma != null) {
+ // We're asked to infer induction variables only for quantifiers, not for lemmas
+ return;
+ }
+ // GO INFER below (only select boundVars)
+ } else if (args.Count == 0) {
+ // {:induction} is treated the same as {:induction true}, which says to automatically infer induction variables
+ // GO INFER below (all boundVars)
+ } else if (args.Count == 1 && args[0] is LiteralExpr && ((LiteralExpr)args[0]).Value is bool) {
+ // {:induction false} or {:induction true}
+ if (!(bool)((LiteralExpr)args[0]).Value) {
+ // we're told not to infer anything
+ return;
+ }
+ // GO INFER below (all boundVars)
+ } else {
+ // Here, we're expecting the arguments to {:induction args} to be a sublist of "boundVars".
+ var goodArguments = new List<Expression>();
+ var i = 0;
+ foreach (var arg in args) {
+ var ie = arg.Resolved as IdentifierExpr;
+ if (ie != null) {
+ var j = boundVars.FindIndex(i, v => v == ie.Var);
+ if (i <= j) {
+ goodArguments.Add(ie);
+ i = j;
+ continue;
+ }
+ if (0 <= boundVars.FindIndex(v => v == ie.Var)) {
+ reporter.Warning(MessageSource.Rewriter, arg.tok, "{0}s given as :induction arguments must be given in the same order as in the {1}; ignoring attribute",
+ lemma != null ? "lemma parameter" : "bound variable", lemma != null ? "lemma" : "quantifier");
+ return;
+ }
+ }
+ reporter.Warning(MessageSource.Rewriter, arg.tok, "invalid :induction attribute argument; expected {0}{1}; ignoring attribute",
+ i == 0 ? "'false' or 'true' or " : "",
+ lemma != null ? "lemma parameter" : "bound variable");
+ return;
+ }
+ // The argument list was legal, so let's use it for the _induction attribute
+ attributes = new Attributes("_induction", goodArguments, attributes);
+ return;
+ }
+
+ // Okay, here we go, coming up with good induction setting for the given situation
+ var inductionVariables = new List<Expression>();
+ foreach (IVariable n in boundVars) {
+ if (!n.Type.IsTypeParameter && (args != null || searchExprs.Exists(expr => VarOccursInArgumentToRecursiveFunction(expr, n)))) {
+ inductionVariables.Add(new IdentifierExpr(n));
+ }
+ }
+ if (inductionVariables.Count != 0) {
+ // We found something usable, so let's record that in an attribute
+ attributes = new Attributes("_induction", inductionVariables, attributes);
+ // And since we're inferring something, let's also report that in a hover text.
+ var s = Printer.OneAttributeToString(attributes, "induction");
+ if (lemma is PrefixLemma) {
+ s = lemma.Name + " " + s;
+ }
+ reporter.Info(MessageSource.Rewriter, tok, s);
+ }
+ }
+ class Induction_Visitor : BottomUpVisitor
+ {
+ readonly InductionRewriter IndRewriter;
+ public Induction_Visitor(InductionRewriter inductionRewriter) {
+ Contract.Requires(inductionRewriter != null);
+ IndRewriter = inductionRewriter;
+ }
+ protected override void VisitOneExpr(Expression expr) {
+ var q = expr as QuantifierExpr;
+ if (q != null && q.SplitQuantifier == null) {
+ IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List<Expression>() { q.LogicalBody() }, null, ref q.Attributes);
+ }
+ }
+ void VisitInductionStmt(Statement stmt) {
+ Contract.Requires(stmt != null);
+ // visit a selection of subexpressions
+ if (stmt is AssertStmt) {
+ var s = (AssertStmt)stmt;
+ Visit(s.Expr);
+ }
+ // recursively visit all substatements
+ foreach (var s in stmt.SubStatements) {
+ VisitInductionStmt(s);
+ }
+ }
+ }
+
+ /// <summary>
+ /// Returns 'true' iff by looking at 'expr' the Induction Heuristic determines that induction should be applied to 'n'.
+ /// More precisely:
+ /// DafnyInductionHeuristic Return 'true'
+ /// ----------------------- -------------
+ /// 0 always
+ /// 1 if 'n' occurs as any subexpression (of 'expr')
+ /// 2 if 'n' occurs as any subexpression of any index argument of an array/sequence select expression or any argument to a recursive function
+ /// 3 if 'n' occurs as a prominent subexpression of any index argument of an array/sequence select expression or any argument to a recursive function
+ /// 4 if 'n' occurs as any subexpression of any argument to a recursive function
+ /// 5 if 'n' occurs as a prominent subexpression of any argument to a recursive function
+ /// 6 if 'n' occurs as a prominent subexpression of any decreases-influencing argument to a recursive function
+ /// Parameter 'n' is allowed to be a ThisSurrogate.
+ /// </summary>
+ public static bool VarOccursInArgumentToRecursiveFunction(Expression expr, IVariable n) {
+ switch (DafnyOptions.O.InductionHeuristic) {
+ case 0: return true;
+ case 1: return Translator.ContainsFreeVariable(expr, false, n);
+ default: return VarOccursInArgumentToRecursiveFunction(expr, n, false);
+ }
+ }
+
+ /// <summary>
+ /// Worker routine for VarOccursInArgumentToRecursiveFunction(expr,n), where the additional parameter 'exprIsProminent' says whether or
+ /// not 'expr' has prominent status in its context.
+ /// DafnyInductionHeuristic cases 0 and 1 are assumed to be handled elsewhere (i.e., a precondition of this method is DafnyInductionHeuristic is at least 2).
+ /// Parameter 'n' is allowed to be a ThisSurrogate.
+ /// </summary>
+ static bool VarOccursInArgumentToRecursiveFunction(Expression expr, IVariable n, bool exprIsProminent) {
+ Contract.Requires(expr != null);
+ Contract.Requires(n != null);
+
+ // The following variable is what gets passed down to recursive calls if the subexpression does not itself acquire prominent status.
+ var subExprIsProminent = DafnyOptions.O.InductionHeuristic == 2 || DafnyOptions.O.InductionHeuristic == 4 ? /*once prominent, always prominent*/exprIsProminent : /*reset the prominent status*/false;
+
+ if (expr is IdentifierExpr) {
+ var e = (IdentifierExpr)expr;
+ return exprIsProminent && e.Var == n;
+ } else if (expr is SeqSelectExpr) {
+ var e = (SeqSelectExpr)expr;
+ var q = DafnyOptions.O.InductionHeuristic < 4 || subExprIsProminent;
+ return VarOccursInArgumentToRecursiveFunction(e.Seq, n, subExprIsProminent) || // this subexpression does not acquire "prominent" status
+ (e.E0 != null && VarOccursInArgumentToRecursiveFunction(e.E0, n, q)) || // this one does (unless arrays/sequences are excluded)
+ (e.E1 != null && VarOccursInArgumentToRecursiveFunction(e.E1, n, q)); // ditto
+ } else if (expr is MultiSelectExpr) {
+ var e = (MultiSelectExpr)expr;
+ var q = DafnyOptions.O.InductionHeuristic < 4 || subExprIsProminent;
+ return VarOccursInArgumentToRecursiveFunction(e.Array, n, subExprIsProminent) ||
+ e.Indices.Exists(exp => VarOccursInArgumentToRecursiveFunction(exp, n, q));
+ } else if (expr is FunctionCallExpr) {
+ var e = (FunctionCallExpr)expr;
+ // For recursive functions: arguments are "prominent"
+ // For non-recursive function: arguments are "prominent" if the call is
+ var rec = e.Function.IsRecursive && e.CoCall != FunctionCallExpr.CoCallResolution.Yes;
+ var decr = e.Function.Decreases.Expressions;
+ bool variantArgument;
+ if (DafnyOptions.O.InductionHeuristic < 6) {
+ variantArgument = rec;
+ } else {
+ // The receiver is considered to be "variant" if the function is recursive and the receiver participates
+ // in the effective decreases clause of the function. The receiver participates if it's a free variable
+ // of a term in the explicit decreases clause.
+ variantArgument = rec && decr.Exists(ee => Translator.ContainsFreeVariable(ee, true, null));
+ }
+ if (VarOccursInArgumentToRecursiveFunction(e.Receiver, n, variantArgument || subExprIsProminent)) {
+ return true;
+ }
+ Contract.Assert(e.Function.Formals.Count == e.Args.Count);
+ for (int i = 0; i < e.Function.Formals.Count; i++) {
+ var f = e.Function.Formals[i];
+ var exp = e.Args[i];
+ if (DafnyOptions.O.InductionHeuristic < 6) {
+ variantArgument = rec;
+ } else if (rec) {
+ // The argument position is considered to be "variant" if the function is recursive and...
+ // ... it has something to do with why the callee is well-founded, which happens when...
+ if (f is ImplicitFormal) {
+ // ... it is the argument is the implicit _k parameter, which is always first in the effective decreases clause of a prefix lemma, or
+ variantArgument = true;
+ } else if (decr.Exists(ee => Translator.ContainsFreeVariable(ee, false, f))) {
+ // ... it participates in the effective decreases clause of the function, which happens when it is
+ // a free variable of a term in the explicit decreases clause, or
+ variantArgument = true;
+ } else {
+ // ... the callee is a prefix predicate.
+ variantArgument = true;
+ }
+ }
+ if (VarOccursInArgumentToRecursiveFunction(exp, n, variantArgument || subExprIsProminent)) {
+ return true;
+ }
+ }
+ return false;
+ } else if (expr is TernaryExpr) {
+ var e = (TernaryExpr)expr;
+ switch (e.Op) {
+ case TernaryExpr.Opcode.PrefixEqOp:
+ case TernaryExpr.Opcode.PrefixNeqOp:
+ return VarOccursInArgumentToRecursiveFunction(e.E0, n, true) ||
+ VarOccursInArgumentToRecursiveFunction(e.E1, n, subExprIsProminent) ||
+ VarOccursInArgumentToRecursiveFunction(e.E2, n, subExprIsProminent);
+ default:
+ Contract.Assert(false); throw new cce.UnreachableException(); // unexpected ternary expression
+ }
+ } else if (expr is DatatypeValue) {
+ var e = (DatatypeValue)expr;
+ var q = n.Type.IsDatatype ? exprIsProminent : subExprIsProminent; // prominent status continues, if we're looking for a variable whose type is a datatype
+ return e.Arguments.Exists(exp => VarOccursInArgumentToRecursiveFunction(exp, n, q));
+ } else if (expr is UnaryExpr) {
+ var e = (UnaryExpr)expr;
+ // both Not and SeqLength preserve prominence
+ return VarOccursInArgumentToRecursiveFunction(e.E, n, exprIsProminent);
+ } else if (expr is BinaryExpr) {
+ var e = (BinaryExpr)expr;
+ bool q;
+ switch (e.ResolvedOp) {
+ case BinaryExpr.ResolvedOpcode.Add:
+ case BinaryExpr.ResolvedOpcode.Sub:
+ case BinaryExpr.ResolvedOpcode.Mul:
+ case BinaryExpr.ResolvedOpcode.Div:
+ case BinaryExpr.ResolvedOpcode.Mod:
+ case BinaryExpr.ResolvedOpcode.Union:
+ case BinaryExpr.ResolvedOpcode.Intersection:
+ case BinaryExpr.ResolvedOpcode.SetDifference:
+ case BinaryExpr.ResolvedOpcode.Concat:
+ // these operators preserve prominence
+ q = exprIsProminent;
+ break;
+ default:
+ // whereas all other binary operators do not
+ q = subExprIsProminent;
+ break;
+ }
+ return VarOccursInArgumentToRecursiveFunction(e.E0, n, q) ||
+ VarOccursInArgumentToRecursiveFunction(e.E1, n, q);
+ } else if (expr is StmtExpr) {
+ var e = (StmtExpr)expr;
+ // ignore the statement
+ return VarOccursInArgumentToRecursiveFunction(e.E, n);
+
+ } else if (expr is ITEExpr) {
+ var e = (ITEExpr)expr;
+ return VarOccursInArgumentToRecursiveFunction(e.Test, n, subExprIsProminent) || // test is not "prominent"
+ VarOccursInArgumentToRecursiveFunction(e.Thn, n, exprIsProminent) || // but the two branches are
+ VarOccursInArgumentToRecursiveFunction(e.Els, n, exprIsProminent);
+ } else if (expr is OldExpr ||
+ expr is ConcreteSyntaxExpression ||
+ expr is BoxingCastExpr ||
+ expr is UnboxingCastExpr) {
+ foreach (var exp in expr.SubExpressions) {
+ if (VarOccursInArgumentToRecursiveFunction(exp, n, exprIsProminent)) { // maintain prominence
+ return true;
+ }
+ }
+ return false;
+ } else {
+ // in all other cases, reset the prominence status and recurse on the subexpressions
+ foreach (var exp in expr.SubExpressions) {
+ if (VarOccursInArgumentToRecursiveFunction(exp, n, subExprIsProminent)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+ }
}
diff --git a/Source/Dafny/Translator.cs b/Source/Dafny/Translator.cs
index 50324002..23c0c3f6 100644
--- a/Source/Dafny/Translator.cs
+++ b/Source/Dafny/Translator.cs
@@ -93,12 +93,10 @@ namespace Microsoft.Dafny {
}
public class Translator {
- readonly ErrorReporter reporter;
+ ErrorReporter reporter;
[NotDelayed]
public Translator(ErrorReporter reporter) {
- Contract.Requires(reporter != null);
-
this.reporter = reporter;
InsertChecksums = 0 < CommandLineOptions.Clo.VerifySnapshots;
Bpl.Program boogieProgram = ReadPrelude();
@@ -2760,126 +2758,100 @@ namespace Microsoft.Dafny {
Bpl.StmtList stmts;
if (!wellformednessProc) {
- if (3 <= DafnyOptions.O.Induction && m.IsGhost && m.Mod.Expressions.Count == 0 && m.Outs.Count == 0 && !(m is FixpointLemma)) {
- var posts = new List<Expression>();
- m.Ens.ForEach(mfe => posts.Add(mfe.E));
- var allIns = new List<Formal>();
- if (!m.IsStatic) {
- allIns.Add(new ThisSurrogate(m.tok, Resolver.GetThisType(m.tok, (ClassDecl)m.EnclosingClass)));
- }
- allIns.AddRange(m.Ins);
- var inductionVars = ApplyInduction(allIns, m.Attributes, posts, delegate(System.IO.TextWriter wr) { wr.Write(m.FullName); });
- if (inductionVars.Count != 0) {
- // Let the parameters be this,x,y of the method M and suppose ApplyInduction returns this,y.
- // Also, let Pre be the precondition and VF be the decreases clause.
- // Then, insert into the method body what amounts to:
- // assume case-analysis-on-parameter[[ y' ]];
- // forall (this', y' | Pre(this', x, y') && VF(this', x, y') << VF(this, x, y)) {
- // this'.M(x, y');
- // }
- // Generate bound variables for the forall statement, and a substitution for the Pre and VF
-
- // assume case-analysis-on-parameter[[ y' ]];
- foreach (var inFormal in m.Ins) {
- var dt = inFormal.Type.AsDatatype;
- if (dt != null) {
- var funcID = new Bpl.FunctionCall(new Bpl.IdentifierExpr(inFormal.tok, "$IsA#" + dt.FullSanitizedName, Bpl.Type.Bool));
- var f = new Bpl.IdentifierExpr(inFormal.tok, inFormal.AssignUniqueName(m.IdGenerator), TrType(inFormal.Type));
- builder.Add(new Bpl.AssumeCmd(inFormal.tok, new Bpl.NAryExpr(inFormal.tok, funcID, new List<Bpl.Expr> { f })));
- }
- }
-
- var parBoundVars = new List<BoundVar>();
- Expression receiverReplacement = null;
- var substMap = new Dictionary<IVariable, Expression>();
- foreach (var iv in inductionVars) {
- BoundVar bv;
- IdentifierExpr ie;
- CloneVariableAsBoundVar(iv.tok, iv, "$ih#" + iv.Name, out bv, out ie);
- parBoundVars.Add(bv);
- if (iv is ThisSurrogate) {
- Contract.Assert(receiverReplacement == null && substMap.Count == 0); // the receiver comes first, if at all
- receiverReplacement = ie;
- } else {
- substMap.Add(iv, ie);
- }
- }
+ var inductionVars = ApplyInduction(m.Ins, m.Attributes);
+ if (inductionVars.Count != 0) {
+ // Let the parameters be this,x,y of the method M and suppose ApplyInduction returns y.
+ // Also, let Pre be the precondition and VF be the decreases clause.
+ // Then, insert into the method body what amounts to:
+ // assume case-analysis-on-parameter[[ y' ]];
+ // forall (y' | Pre(this, x, y') && VF(this, x, y') << VF(this, x, y)) {
+ // this.M(x, y');
+ // }
+ // Generate bound variables for the forall statement, and a substitution for the Pre and VF
+
+ // assume case-analysis-on-parameter[[ y' ]];
+ foreach (var inFormal in m.Ins) {
+ var dt = inFormal.Type.AsDatatype;
+ if (dt != null) {
+ var funcID = new Bpl.FunctionCall(new Bpl.IdentifierExpr(inFormal.tok, "$IsA#" + dt.FullSanitizedName, Bpl.Type.Bool));
+ var f = new Bpl.IdentifierExpr(inFormal.tok, inFormal.AssignUniqueName(m.IdGenerator), TrType(inFormal.Type));
+ builder.Add(new Bpl.AssumeCmd(inFormal.tok, new Bpl.NAryExpr(inFormal.tok, funcID, new List<Bpl.Expr> { f })));
+ }
+ }
+
+ var parBoundVars = new List<BoundVar>();
+ var substMap = new Dictionary<IVariable, Expression>();
+ foreach (var iv in inductionVars) {
+ BoundVar bv;
+ IdentifierExpr ie;
+ CloneVariableAsBoundVar(iv.tok, iv, "$ih#" + iv.Name, out bv, out ie);
+ parBoundVars.Add(bv);
+ substMap.Add(iv, ie);
+ }
- // Generate a CallStmt for the recursive call
- Expression recursiveCallReceiver;
- if (receiverReplacement != null) {
- recursiveCallReceiver = receiverReplacement;
- } else if (m.IsStatic) {
- recursiveCallReceiver = new StaticReceiverExpr(m.tok, (ClassDecl)m.EnclosingClass, true); // this also resolves it
+ // Generate a CallStmt for the recursive call
+ Expression recursiveCallReceiver;
+ if (m.IsStatic) {
+ recursiveCallReceiver = new StaticReceiverExpr(m.tok, (ClassDecl)m.EnclosingClass, true); // this also resolves it
+ } else {
+ recursiveCallReceiver = new ImplicitThisExpr(m.tok);
+ recursiveCallReceiver.Type = Resolver.GetThisType(m.tok, (ClassDecl)m.EnclosingClass); // resolve here
+ }
+ var recursiveCallArgs = new List<Expression>();
+ foreach (var inFormal in m.Ins) {
+ Expression inE;
+ if (substMap.TryGetValue(inFormal, out inE)) {
+ recursiveCallArgs.Add(inE);
} else {
- recursiveCallReceiver = new ImplicitThisExpr(m.tok);
- recursiveCallReceiver.Type = Resolver.GetThisType(m.tok, (ClassDecl)m.EnclosingClass); // resolve here
- }
- var recursiveCallArgs = new List<Expression>();
- foreach (var inFormal in m.Ins) {
- Expression inE;
- if (substMap.TryGetValue(inFormal, out inE)) {
- recursiveCallArgs.Add(inE);
- } else {
- var ie = new IdentifierExpr(inFormal.tok, inFormal.Name);
- ie.Var = inFormal; // resolve here
- ie.Type = inFormal.Type; // resolve here
- recursiveCallArgs.Add(ie);
- }
- }
- var methodSel = new MemberSelectExpr(m.tok, recursiveCallReceiver, m.Name);
- methodSel.Member = m; // resolve here
- methodSel.TypeApplication = new List<Type>();
- methodSel.TypeApplication.AddRange(recursiveCallReceiver.Type.TypeArgs);
- m.TypeArgs.ForEach(tp => methodSel.TypeApplication.Add(new UserDefinedType(tp)));
- methodSel.Type = new InferredTypeProxy(); // this is the last step in resolving 'methodSel'
- var recursiveCall = new CallStmt(m.tok, m.tok, new List<Expression>(), methodSel, recursiveCallArgs);
- recursiveCall.IsGhost = m.IsGhost; // resolve here
-
- Expression parRange = new LiteralExpr(m.tok, true);
- parRange.Type = Type.Bool; // resolve here
- if (receiverReplacement != null) {
- // add "this' != null" to the range
- var nil = new LiteralExpr(receiverReplacement.tok);
- nil.Type = receiverReplacement.Type; // resolve here
- var neqNull = new BinaryExpr(receiverReplacement.tok, BinaryExpr.Opcode.Neq, receiverReplacement, nil);
- neqNull.ResolvedOp = BinaryExpr.ResolvedOpcode.NeqCommon; // resolve here
- neqNull.Type = Type.Bool; // resolve here
- parRange = Expression.CreateAnd(parRange, neqNull);
- }
- foreach (var pre in m.Req) {
- if (!pre.IsFree) {
- parRange = Expression.CreateAnd(parRange, Substitute(pre.E, receiverReplacement, substMap));
- }
- }
- // construct an expression (generator) for: VF' << VF
- ExpressionConverter decrCheck = delegate(Dictionary<IVariable, Expression> decrSubstMap, ExpressionTranslator exprTran) {
- var decrToks = new List<IToken>();
- var decrTypes = new List<Type>();
- var decrCallee = new List<Expr>();
- var decrCaller = new List<Expr>();
- foreach (var ee in m.Decreases.Expressions) {
- decrToks.Add(ee.tok);
- decrTypes.Add(ee.Type.NormalizeExpand());
- decrCaller.Add(exprTran.TrExpr(ee));
- Expression es = Substitute(ee, receiverReplacement, substMap);
- es = Substitute(es, null, decrSubstMap);
- decrCallee.Add(exprTran.TrExpr(es));
- }
- return DecreasesCheck(decrToks, decrTypes, decrTypes, decrCallee, decrCaller, null, null, false, true);
- };
+ var ie = new IdentifierExpr(inFormal.tok, inFormal.Name);
+ ie.Var = inFormal; // resolve here
+ ie.Type = inFormal.Type; // resolve here
+ recursiveCallArgs.Add(ie);
+ }
+ }
+ var methodSel = new MemberSelectExpr(m.tok, recursiveCallReceiver, m.Name);
+ methodSel.Member = m; // resolve here
+ methodSel.TypeApplication = new List<Type>();
+ methodSel.TypeApplication.AddRange(recursiveCallReceiver.Type.TypeArgs);
+ m.TypeArgs.ForEach(tp => methodSel.TypeApplication.Add(new UserDefinedType(tp)));
+ methodSel.Type = new InferredTypeProxy(); // this is the last step in resolving 'methodSel'
+ var recursiveCall = new CallStmt(m.tok, m.tok, new List<Expression>(), methodSel, recursiveCallArgs);
+ recursiveCall.IsGhost = m.IsGhost; // resolve here
+
+ Expression parRange = new LiteralExpr(m.tok, true);
+ parRange.Type = Type.Bool; // resolve here
+ foreach (var pre in m.Req) {
+ if (!pre.IsFree) {
+ parRange = Expression.CreateAnd(parRange, Substitute(pre.E, null, substMap));
+ }
+ }
+ // construct an expression (generator) for: VF' << VF
+ ExpressionConverter decrCheck = delegate(Dictionary<IVariable, Expression> decrSubstMap, ExpressionTranslator exprTran) {
+ var decrToks = new List<IToken>();
+ var decrTypes = new List<Type>();
+ var decrCallee = new List<Expr>();
+ var decrCaller = new List<Expr>();
+ foreach (var ee in m.Decreases.Expressions) {
+ decrToks.Add(ee.tok);
+ decrTypes.Add(ee.Type.NormalizeExpand());
+ decrCaller.Add(exprTran.TrExpr(ee));
+ Expression es = Substitute(ee, null, substMap);
+ es = Substitute(es, null, decrSubstMap);
+ decrCallee.Add(exprTran.TrExpr(es));
+ }
+ return DecreasesCheck(decrToks, decrTypes, decrTypes, decrCallee, decrCaller, null, null, false, true);
+ };
#if VERIFY_CORRECTNESS_OF_TRANSLATION_FORALL_STATEMENT_RANGE
- var definedness = new Bpl.StmtListBuilder();
- var exporter = new Bpl.StmtListBuilder();
- TrForallStmtCall(m.tok, parBoundVars, parRange, decrCheck, recursiveCall, definedness, exporter, localVariables, etran);
- // All done, so put the two pieces together
- builder.Add(new Bpl.IfCmd(m.tok, null, definedness.Collect(m.tok), null, exporter.Collect(m.tok)));
+ var definedness = new Bpl.StmtListBuilder();
+ var exporter = new Bpl.StmtListBuilder();
+ TrForallStmtCall(m.tok, parBoundVars, parRange, decrCheck, recursiveCall, definedness, exporter, localVariables, etran);
+ // All done, so put the two pieces together
+ builder.Add(new Bpl.IfCmd(m.tok, null, definedness.Collect(m.tok), null, exporter.Collect(m.tok)));
#else
- TrForallStmtCall(m.tok, parBoundVars, parRange, decrCheck, recursiveCall, null, builder, localVariables, etran);
+ TrForallStmtCall(m.tok, parBoundVars, parRange, decrCheck, recursiveCall, null, builder, localVariables, etran);
#endif
- }
}
// translate the body of the method
Contract.Assert(m.Body != null); // follows from method precondition and the if guard
@@ -11554,58 +11526,56 @@ namespace Microsoft.Dafny {
return TrExpr(((NamedExpr)expr).Body);
} else if (expr is QuantifierExpr) {
QuantifierExpr e = (QuantifierExpr)expr;
+ List<Variable> tyvars = translator.MkTyParamBinders(e.TypeArgs);
+ List<Variable> bvars = new List<Variable>();
- if (e.SplitQuantifier != null) {
- return TrExpr(e.SplitQuantifierExpression);
- } else {
- List<Variable> tyvars = translator.MkTyParamBinders(e.TypeArgs);
- List<Variable> bvars = new List<Variable>();
-
- var initEtran = this;
- var bodyEtran = this;
- bool _scratch = true;
-
- Bpl.Expr antecedent = Bpl.Expr.True;
-
- if (Attributes.ContainsBool(e.Attributes, "layerQuantifier", ref _scratch)) {
- // If this is a layer quantifier, quantify over layers here, and use $LS(ly) layers in the translation of the body
- var ly = BplBoundVar(e.Refresh("q$ly#", translator.CurrentIdGenerator), predef.LayerType, bvars);
- bodyEtran = new ExpressionTranslator(translator, predef, HeapExpr, This, applyLimited_CurrentFunction, new FuelSetting(translator, 1, ly), new FuelSetting(translator, 1, ly), modifiesFrame, stripLits);
- }
- if (Attributes.ContainsBool(e.Attributes, "heapQuantifier", ref _scratch)) {
- var h = BplBoundVar(e.Refresh("q$heap#", translator.CurrentIdGenerator), predef.HeapType, bvars);
- bodyEtran = new ExpressionTranslator(bodyEtran, h);
- antecedent = BplAnd(new List<Bpl.Expr> {
- antecedent,
- translator.FunctionCall(e.tok, BuiltinFunction.IsGoodHeap, null, h),
- translator.HeapSameOrSucc(initEtran.HeapExpr, h)
- });
- }
+ var initEtran = this;
+ var bodyEtran = this;
+ bool _scratch = true;
- antecedent = BplAnd(antecedent, bodyEtran.TrBoundVariables(e.BoundVars, bvars));
+ Bpl.Expr antecedent = Bpl.Expr.True;
- Bpl.QKeyValue kv = TrAttributes(e.Attributes, "trigger");
- Bpl.Trigger tr = null;
- var argsEtran = bodyEtran.WithNoLits();
- foreach (var aa in e.Attributes.AsEnumerable().Where(aa => aa.Name == "trigger")) {
+ if (Attributes.ContainsBool(e.Attributes, "layerQuantifier", ref _scratch)) {
+ // If this is a layer quantifier, quantify over layers here, and use $LS(ly) layers in the translation of the body
+ var ly = BplBoundVar(e.Refresh("q$ly#", translator.CurrentIdGenerator), predef.LayerType, bvars);
+ bodyEtran = new ExpressionTranslator(translator, predef, HeapExpr, This, applyLimited_CurrentFunction, new FuelSetting(translator, 1, ly), new FuelSetting(translator, 1, ly), modifiesFrame, stripLits);
+ }
+ if (Attributes.ContainsBool(e.Attributes, "heapQuantifier", ref _scratch)) {
+ var h = BplBoundVar(e.Refresh("q$heap#", translator.CurrentIdGenerator), predef.HeapType, bvars);
+ bodyEtran = new ExpressionTranslator(bodyEtran, h);
+ antecedent = BplAnd(new List<Bpl.Expr> {
+ antecedent,
+ translator.FunctionCall(e.tok, BuiltinFunction.IsGoodHeap, null, h),
+ translator.HeapSameOrSucc(initEtran.HeapExpr, h)
+ });
+ }
+
+ antecedent = BplAnd(antecedent, bodyEtran.TrBoundVariables(e.BoundVars, bvars));
+
+ Bpl.QKeyValue kv = TrAttributes(e.Attributes, "trigger");
+ Bpl.Trigger tr = null;
+ var argsEtran = bodyEtran.WithNoLits();
+ foreach (var aa in e.Attributes.AsEnumerable()) {
+ if (aa.Name == "trigger") {
List<Bpl.Expr> tt = new List<Bpl.Expr>();
foreach (var arg in aa.Args) {
tt.Add(argsEtran.TrExpr(arg));
}
tr = new Bpl.Trigger(expr.tok, true, tt, tr);
}
- if (e.Range != null) {
- antecedent = BplAnd(antecedent, bodyEtran.TrExpr(e.Range));
- }
- Bpl.Expr body = bodyEtran.TrExpr(e.Term);
+ }
+ if (e.Range != null) {
+ antecedent = BplAnd(antecedent, bodyEtran.TrExpr(e.Range));
+ }
+ Bpl.Expr body = bodyEtran.TrExpr(e.Term);
- if (e is ForallExpr) {
- return new Bpl.ForallExpr(expr.tok, new List<TypeVariable>(), Concat(tyvars, bvars), kv, tr, Bpl.Expr.Imp(antecedent, body));
- } else {
- Contract.Assert(e is ExistsExpr);
- return new Bpl.ExistsExpr(expr.tok, new List<TypeVariable>(), Concat(tyvars, bvars), kv, tr, Bpl.Expr.And(antecedent, body));
- }
+ if (e is ForallExpr) {
+ return new Bpl.ForallExpr(expr.tok, new List<TypeVariable>(), Concat(tyvars, bvars), kv, tr, Bpl.Expr.Imp(antecedent, body));
+ } else {
+ Contract.Assert(e is ExistsExpr);
+ return new Bpl.ExistsExpr(expr.tok, new List<TypeVariable>(), Concat(tyvars, bvars), kv, tr, Bpl.Expr.And(antecedent, body));
}
+
} else if (expr is SetComprehension) {
var e = (SetComprehension)expr;
// Translate "set xs | R :: T" into "lambda y: BoxType :: (exists xs :: CorrectType(xs) && R && y==Box(T))".
@@ -12994,8 +12964,8 @@ namespace Microsoft.Dafny {
/* NB: only for type arg less quantifiers for now: */
&& ((QuantifierExpr)expr).TypeArgs.Count == 0) {
var e = (QuantifierExpr)expr;
- var inductionVariables = ApplyInduction(e);
- if (apply_induction && 2 <= DafnyOptions.O.Induction && inductionVariables.Count != 0) {
+ var inductionVariables = ApplyInduction(e.BoundVars, e.Attributes);
+ if (apply_induction && inductionVariables.Count != 0) {
// From the given quantifier (forall n :: P(n)), generate the seemingly weaker proof obligation
// (forall n :: (forall k :: k < n ==> P(k)) ==> P(n))
// For an existential (exists n :: P(n)), it is
@@ -13200,298 +13170,29 @@ namespace Microsoft.Dafny {
return RefinementToken.IsInherited(expr.tok, currentModule) && (codeContext == null || !codeContext.MustReverify) && RefinementTransformer.ContainsChange(expr, currentModule);
}
- List<BoundVar> ApplyInduction(QuantifierExpr e) {
- Contract.Requires(e.SplitQuantifier == null); // Don't call this on a quantifier with a Split clause: it's not a real quantifier
- Contract.Requires(e.TypeArgs.Count == 0);
- return ApplyInduction(e.BoundVars, e.Attributes, new List<Expression>() { e.LogicalBody() },
- delegate(System.IO.TextWriter wr) { new Printer(wr).PrintExpression(e, true); });
- }
-
- delegate void TracePrinter(System.IO.TextWriter wr);
-
/// <summary>
/// Return a subset of "boundVars" (in the order giving in "boundVars") to which to apply induction to,
- /// according to :induction attributes in "attributes" and heuristically interesting subexpressions of
- /// "searchExprs".
+ /// according to :_induction attribute in "attributes".
/// </summary>
- List<VarType> ApplyInduction<VarType>(List<VarType> boundVars, Attributes attributes, List<Expression> searchExprs, TracePrinter tracePrinter) where VarType : class, IVariable
+ List<VarType> ApplyInduction<VarType>(List<VarType> boundVars, Attributes attributes) where VarType : class, IVariable
{
Contract.Requires(boundVars != null);
- Contract.Requires(searchExprs != null);
- Contract.Requires(tracePrinter != null);
Contract.Ensures(Contract.Result<List<VarType>>() != null);
- if (DafnyOptions.O.Induction == 0) {
+ var args = Attributes.FindExpressions(attributes, "_induction");
+ if (args == null) {
return new List<VarType>(); // don't apply induction
}
- foreach (var a in attributes.AsEnumerable()) {
- if (a.Name == "induction") {
- // Here are the supported forms of the :induction attribute.
- // :induction -- apply induction to all bound variables
- // :induction false -- suppress induction, that is, don't apply it to any bound variable
- // :induction L where L is a list consisting entirely of bound variables:
- // -- apply induction to the specified bound variables
- // :induction X where X is anything else
- // -- treat the same as {:induction}, that is, apply induction to all
- // bound variables
-
- // Handle {:induction false}
- if (a.Args.Count == 1) {
- var arg = a.Args[0] as LiteralExpr;
- if (arg != null && arg.Value is bool && !(bool)arg.Value) {
- if (CommandLineOptions.Clo.Trace) {
- Console.Write("Suppressing automatic induction for: ");
- tracePrinter(Console.Out);
- Console.WriteLine();
- }
- return new List<VarType>();
- }
- }
-
- // Handle {:induction L}
- if (a.Args.Count != 0) {
- // check that all attribute arguments refer to bound variables; otherwise, go to default_form
- var argsAsVars = new List<VarType>();
- foreach (var arg in a.Args) {
- var theArg = arg.Resolved;
- if (theArg is ThisExpr) {
- foreach (var bv in boundVars) {
- if (bv is ThisSurrogate) {
- argsAsVars.Add(bv);
- goto TRY_NEXT_ATTRIBUTE_ARGUMENT;
- }
- }
- } else if (theArg is IdentifierExpr) {
- var id = (IdentifierExpr)theArg;
- var bv = id.Var as VarType;
- if (bv != null && boundVars.Contains(bv)) {
- argsAsVars.Add(bv);
- goto TRY_NEXT_ATTRIBUTE_ARGUMENT;
- }
- }
- // the attribute argument was not one of the possible induction variables
- goto USE_DEFAULT_FORM;
- TRY_NEXT_ATTRIBUTE_ARGUMENT:
- ;
- }
- // so, all attribute arguments are variables; add them to L in the order of the bound variables (not necessarily the order in the attribute)
- var L = new List<VarType>();
- foreach (var bv in boundVars) {
- if (argsAsVars.Contains(bv)) {
- L.Add(bv);
- }
- }
- if (CommandLineOptions.Clo.Trace) {
- string sep = "Applying requested induction on ";
- foreach (var bv in L) {
- Console.Write("{0}{1}", sep, bv.Name);
- sep = ", ";
- }
- Console.Write(" of: ");
- tracePrinter(Console.Out);
- Console.WriteLine();
- }
- return L;
- USE_DEFAULT_FORM: ;
- }
-
- // We have the {:induction} case, or something to be treated in the same way
- if (CommandLineOptions.Clo.Trace) {
- Console.Write("Applying requested induction on all bound variables of: ");
- tracePrinter(Console.Out);
- Console.WriteLine();
- }
- return boundVars;
- }
- }
-
- if (DafnyOptions.O.Induction < 2) {
- return new List<VarType>(); // don't apply induction
- }
-
- // consider automatically applying induction
- var inductionVariables = new List<VarType>();
- foreach (var n in boundVars) {
- if (!n.Type.IsTypeParameter && searchExprs.Exists(expr => VarOccursInArgumentToRecursiveFunction(expr, n))) {
- if (CommandLineOptions.Clo.Trace) {
- Console.Write("Applying automatic induction on variable '{0}' of: ", n.Name);
- tracePrinter(Console.Out);
- Console.WriteLine();
- }
- inductionVariables.Add(n);
- }
- }
-
- return inductionVariables;
- }
-
- /// <summary>
- /// Returns 'true' iff by looking at 'expr' the Induction Heuristic determines that induction should be applied to 'n'.
- /// More precisely:
- /// DafnyInductionHeuristic Return 'true'
- /// ----------------------- -------------
- /// 0 always
- /// 1 if 'n' occurs as any subexpression (of 'expr')
- /// 2 if 'n' occurs as any subexpression of any index argument of an array/sequence select expression or any argument to a recursive function
- /// 3 if 'n' occurs as a prominent subexpression of any index argument of an array/sequence select expression or any argument to a recursive function
- /// 4 if 'n' occurs as any subexpression of any argument to a recursive function
- /// 5 if 'n' occurs as a prominent subexpression of any argument to a recursive function
- /// 6 if 'n' occurs as a prominent subexpression of any decreases-influencing argument to a recursive function
- /// Parameter 'n' is allowed to be a ThisSurrogate.
- /// </summary>
- bool VarOccursInArgumentToRecursiveFunction(Expression expr, IVariable n) {
- switch (DafnyOptions.O.InductionHeuristic) {
- case 0: return true;
- case 1: return ContainsFreeVariable(expr, false, n);
- default: return VarOccursInArgumentToRecursiveFunction(expr, n, false);
- }
- }
-
- /// <summary>
- /// Worker routine for VarOccursInArgumentToRecursiveFunction(expr,n), where the additional parameter 'exprIsProminent' says whether or
- /// not 'expr' has prominent status in its context.
- /// DafnyInductionHeuristic cases 0 and 1 are assumed to be handled elsewhere (i.e., a precondition of this method is DafnyInductionHeuristic is at least 2).
- /// Parameter 'n' is allowed to be a ThisSurrogate.
- /// </summary>
- bool VarOccursInArgumentToRecursiveFunction(Expression expr, IVariable n, bool exprIsProminent) {
- Contract.Requires(expr != null);
- Contract.Requires(n != null);
-
- // The following variable is what gets passed down to recursive calls if the subexpression does not itself acquire prominent status.
- var subExprIsProminent = DafnyOptions.O.InductionHeuristic == 2 || DafnyOptions.O.InductionHeuristic == 4 ? /*once prominent, always prominent*/exprIsProminent : /*reset the prominent status*/false;
-
- if (expr is ThisExpr) {
- return exprIsProminent && n is ThisSurrogate;
- } else if (expr is IdentifierExpr) {
- var e = (IdentifierExpr)expr;
- return exprIsProminent && e.Var == n;
- } else if (expr is SeqSelectExpr) {
- var e = (SeqSelectExpr)expr;
- var q = DafnyOptions.O.InductionHeuristic < 4 || subExprIsProminent;
- return VarOccursInArgumentToRecursiveFunction(e.Seq, n, subExprIsProminent) || // this subexpression does not acquire "prominent" status
- (e.E0 != null && VarOccursInArgumentToRecursiveFunction(e.E0, n, q)) || // this one does (unless arrays/sequences are excluded)
- (e.E1 != null && VarOccursInArgumentToRecursiveFunction(e.E1, n, q)); // ditto
- } else if (expr is MultiSelectExpr) {
- var e = (MultiSelectExpr)expr;
- var q = DafnyOptions.O.InductionHeuristic < 4 || subExprIsProminent;
- return VarOccursInArgumentToRecursiveFunction(e.Array, n, subExprIsProminent) ||
- e.Indices.Exists(exp => VarOccursInArgumentToRecursiveFunction(exp, n, q));
- } else if (expr is FunctionCallExpr) {
- var e = (FunctionCallExpr)expr;
- // For recursive functions: arguments are "prominent"
- // For non-recursive function: arguments are "prominent" if the call is
- var rec = e.Function.IsRecursive && e.CoCall != FunctionCallExpr.CoCallResolution.Yes;
- var decr = e.Function.Decreases.Expressions;
- bool variantArgument;
- if (DafnyOptions.O.InductionHeuristic < 6) {
- variantArgument = rec;
- } else {
- // The receiver is considered to be "variant" if the function is recursive and the receiver participates
- // in the effective decreases clause of the function. The receiver participates if it's a free variable
- // of a term in the explicit decreases clause.
- variantArgument = rec && decr.Exists(ee => ContainsFreeVariable(ee, true, null));
- }
- if (VarOccursInArgumentToRecursiveFunction(e.Receiver, n, variantArgument || subExprIsProminent)) {
- return true;
- }
- Contract.Assert(e.Function.Formals.Count == e.Args.Count);
- for (int i = 0; i < e.Function.Formals.Count; i++) {
- var f = e.Function.Formals[i];
- var exp = e.Args[i];
- if (DafnyOptions.O.InductionHeuristic < 6) {
- variantArgument = rec;
- } else if (rec) {
- // The argument position is considered to be "variant" if the function is recursive and...
- // ... it has something to do with why the callee is well-founded, which happens when...
- if (f is ImplicitFormal) {
- // ... it is the argument is the implicit _k parameter, which is always first in the effective decreases clause of a prefix lemma, or
- variantArgument = true;
- } else if (decr.Exists(ee => ContainsFreeVariable(ee, false, f))) {
- // ... it participates in the effective decreases clause of the function, which happens when it is
- // a free variable of a term in the explicit decreases clause, or
- variantArgument = true;
- } else {
- // ... the callee is a prefix predicate.
- variantArgument = true;
- }
- }
- if (VarOccursInArgumentToRecursiveFunction(exp, n, variantArgument || subExprIsProminent)) {
- return true;
- }
- }
- return false;
- } else if (expr is TernaryExpr) {
- var e = (TernaryExpr)expr;
- switch (e.Op) {
- case TernaryExpr.Opcode.PrefixEqOp:
- case TernaryExpr.Opcode.PrefixNeqOp:
- return VarOccursInArgumentToRecursiveFunction(e.E0, n, true) ||
- VarOccursInArgumentToRecursiveFunction(e.E1, n, subExprIsProminent) ||
- VarOccursInArgumentToRecursiveFunction(e.E2, n, subExprIsProminent);
- default:
- Contract.Assert(false); throw new cce.UnreachableException(); // unexpected ternary expression
- }
- } else if (expr is DatatypeValue) {
- var e = (DatatypeValue)expr;
- var q = n.Type.IsDatatype ? exprIsProminent : subExprIsProminent; // prominent status continues, if we're looking for a variable whose type is a datatype
- return e.Arguments.Exists(exp => VarOccursInArgumentToRecursiveFunction(exp, n, q));
- } else if (expr is UnaryExpr) {
- var e = (UnaryExpr)expr;
- // both Not and SeqLength preserve prominence
- return VarOccursInArgumentToRecursiveFunction(e.E, n, exprIsProminent);
- } else if (expr is BinaryExpr) {
- var e = (BinaryExpr)expr;
- bool q;
- switch (e.ResolvedOp) {
- case BinaryExpr.ResolvedOpcode.Add:
- case BinaryExpr.ResolvedOpcode.Sub:
- case BinaryExpr.ResolvedOpcode.Mul:
- case BinaryExpr.ResolvedOpcode.Div:
- case BinaryExpr.ResolvedOpcode.Mod:
- case BinaryExpr.ResolvedOpcode.Union:
- case BinaryExpr.ResolvedOpcode.Intersection:
- case BinaryExpr.ResolvedOpcode.SetDifference:
- case BinaryExpr.ResolvedOpcode.Concat:
- // these operators preserve prominence
- q = exprIsProminent;
- break;
- default:
- // whereas all other binary operators do not
- q = subExprIsProminent;
- break;
- }
- return VarOccursInArgumentToRecursiveFunction(e.E0, n, q) ||
- VarOccursInArgumentToRecursiveFunction(e.E1, n, q);
- } else if (expr is StmtExpr) {
- var e = (StmtExpr)expr;
- // ignore the statement
- return VarOccursInArgumentToRecursiveFunction(e.E, n);
-
- } else if (expr is ITEExpr) {
- var e = (ITEExpr)expr;
- return VarOccursInArgumentToRecursiveFunction(e.Test, n, subExprIsProminent) || // test is not "prominent"
- VarOccursInArgumentToRecursiveFunction(e.Thn, n, exprIsProminent) || // but the two branches are
- VarOccursInArgumentToRecursiveFunction(e.Els, n, exprIsProminent);
- } else if (expr is OldExpr ||
- expr is ConcreteSyntaxExpression ||
- expr is BoxingCastExpr ||
- expr is UnboxingCastExpr) {
- foreach (var exp in expr.SubExpressions) {
- if (VarOccursInArgumentToRecursiveFunction(exp, n, exprIsProminent)) { // maintain prominence
- return true;
- }
- }
- return false;
- } else {
- // in all other cases, reset the prominence status and recurse on the subexpressions
- foreach (var exp in expr.SubExpressions) {
- if (VarOccursInArgumentToRecursiveFunction(exp, n, subExprIsProminent)) {
- return true;
- }
- }
- return false;
+ var argsAsVars = new List<VarType>();
+ foreach (var arg in args) {
+ // We expect each "arg" to be an IdentifierExpr among "boundVars"
+ var id = (IdentifierExpr)arg;
+ var bv = (VarType)id.Var;
+ Contract.Assume(boundVars.Contains(bv));
+ argsAsVars.Add(bv);
}
+ return argsAsVars;
}
IEnumerable<Bpl.Expr> InductionCases(Type ty, Bpl.Expr expr, ExpressionTranslator etran) {
@@ -13538,12 +13239,12 @@ namespace Microsoft.Dafny {
/// Parameter 'v' is allowed to be a ThisSurrogate, in which case the method return true iff 'this'
/// occurs in 'expr'.
/// </summary>
- static bool ContainsFreeVariable(Expression expr, bool lookForReceiver, IVariable v) {
+ public static bool ContainsFreeVariable(Expression expr, bool lookForReceiver, IVariable v) {
Contract.Requires(expr != null);
Contract.Requires(lookForReceiver || v != null);
if (expr is ThisExpr) {
- return lookForReceiver || v is ThisSurrogate;
+ return lookForReceiver;
} else if (expr is IdentifierExpr) {
IdentifierExpr e = (IdentifierExpr)expr;
return e.Var == v;
diff --git a/Source/DafnyServer/DafnyHelper.cs b/Source/DafnyServer/DafnyHelper.cs
index d6c3f5c7..952f71c5 100644
--- a/Source/DafnyServer/DafnyHelper.cs
+++ b/Source/DafnyServer/DafnyHelper.cs
@@ -62,7 +62,7 @@ namespace Microsoft.Dafny {
}
private bool Translate() {
- var translator = new Dafny.Translator(reporter) { InsertChecksums = true, UniqueIdPrefix = null }; //FIXME check if null is OK for UniqueIdPrefix
+ var translator = new Dafny.Translator(reporter) { InsertChecksums = true, UniqueIdPrefix = fname };
boogieProgram = translator.Translate(dafnyProgram); // FIXME how are translation errors reported?
return true;
}
diff --git a/Source/DafnyServer/Utilities.cs b/Source/DafnyServer/Utilities.cs
index 96254a3d..5bd6e7d5 100644
--- a/Source/DafnyServer/Utilities.cs
+++ b/Source/DafnyServer/Utilities.cs
@@ -53,7 +53,7 @@ namespace Microsoft.Dafny {
Dafny.DafnyOptions.O.UnicodeOutput = true;
Dafny.DafnyOptions.O.VerifySnapshots = 2;
Dafny.DafnyOptions.O.VcsCores = Math.Max(1, System.Environment.ProcessorCount - 1);
- Dafny.DafnyOptions.Clo.Trace = trace;
+ Dafny.DafnyOptions.O.Trace = trace;
} else {
throw new ServerException("Invalid command line options");
}
diff --git a/Test/VerifyThis2015/Problem1.dfy b/Test/VerifyThis2015/Problem1.dfy
index 2e8a5243..67eeba07 100644
--- a/Test/VerifyThis2015/Problem1.dfy
+++ b/Test/VerifyThis2015/Problem1.dfy
@@ -161,18 +161,13 @@ lemma Same2<T>(pat: seq<T>, a: seq<T>)
ensures IRP_Alt(pat, a)
{
if pat == [] {
- assert pat <= a;
} else if a != [] && pat[0] == a[0] {
- assert IsRelaxedPrefixAux(pat[1..], a[1..], 1);
- Same2(pat[1..], a[1..]);
if pat[1..] <= a[1..] {
- assert pat <= a;
} else {
var k :| 0 <= k < |pat[1..]| && pat[1..][..k] + pat[1..][k+1..] <= a[1..];
assert 0 <= k+1 < |pat| && pat[..k+1] + pat[k+2..] <= a;
}
} else {
- assert IsRelaxedPrefixAux(pat[1..], a, 0);
Same2_Prefix(pat[1..], a);
assert pat[1..] <= a;
assert 0 <= 0 < |pat| && pat[..0] + pat[0+1..] <= a;
@@ -182,7 +177,4 @@ lemma Same2_Prefix<T>(pat: seq<T>, a: seq<T>)
requires IsRelaxedPrefixAux(pat, a, 0)
ensures pat <= a
{
- if pat != [] {
- Same2_Prefix(pat[1..], a[1..]);
- }
}
diff --git a/Test/dafny0/InductivePredicates.dfy b/Test/dafny0/InductivePredicates.dfy
index 424118e7..8d05af11 100644
--- a/Test/dafny0/InductivePredicates.dfy
+++ b/Test/dafny0/InductivePredicates.dfy
@@ -18,7 +18,7 @@ lemma M(x: natinf)
}
// yay! my first proof involving an inductive predicate :)
-lemma M'(k: nat, x: natinf)
+lemma {:induction false} M'(k: nat, x: natinf)
requires Even#[k](x)
ensures x.N? && x.n % 2 == 0
{
@@ -32,8 +32,14 @@ lemma M'(k: nat, x: natinf)
}
}
+lemma M'_auto(k: nat, x: natinf)
+ requires Even#[k](x)
+ ensures x.N? && x.n % 2 == 0
+{
+}
+
// Here is the same proof as in M / M', but packaged into a single "inductive lemma":
-inductive lemma IL(x: natinf)
+inductive lemma {:induction false} IL(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -45,7 +51,7 @@ inductive lemma IL(x: natinf)
}
}
-inductive lemma IL_EvenBetter(x: natinf)
+inductive lemma {:induction false} IL_EvenBetter(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -57,6 +63,12 @@ inductive lemma IL_EvenBetter(x: natinf)
}
}
+inductive lemma IL_Best(x: natinf)
+ requires Even(x)
+ ensures x.N? && x.n % 2 == 0
+{
+}
+
inductive lemma IL_Bad(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
@@ -107,7 +119,7 @@ module Alt {
{
match x
case N(n) => N(n+1)
- case Inf => Inf
+ case Inf => Inf
}
inductive predicate Even(x: natinf)
@@ -116,7 +128,7 @@ module Alt {
exists y :: x == S(S(y)) && Even(y)
}
- inductive lemma MyLemma_NotSoNice(x: natinf)
+ inductive lemma {:induction false} MyLemma_NotSoNice(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -130,7 +142,7 @@ module Alt {
}
}
- inductive lemma MyLemma_NiceButNotFast(x: natinf)
+ inductive lemma {:induction false} MyLemma_NiceButNotFast(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -143,7 +155,13 @@ module Alt {
assert x.n == y.n + 2;
}
}
-
+
+ inductive lemma MyLemma_RealNice_AndFastToo(x: natinf)
+ requires Even(x)
+ ensures x.N? && x.n % 2 == 0
+ {
+ }
+
lemma InfNotEven()
ensures !Even(Inf)
{
@@ -156,15 +174,6 @@ module Alt {
requires Even(Inf)
ensures false
{
- var x := Inf;
- if {
- case x.N? && x.n == 0 =>
- assert false; // this case is absurd
- case exists y :: x == S(S(y)) && Even(y) =>
- var y :| x == S(S(y)) && Even(y);
- assert y == Inf;
- InfNotEven_Aux();
- }
}
lemma NextEven(x: natinf)
diff --git a/Test/dafny0/InductivePredicates.dfy.expect b/Test/dafny0/InductivePredicates.dfy.expect
index ccf30643..48beade5 100644
--- a/Test/dafny0/InductivePredicates.dfy.expect
+++ b/Test/dafny0/InductivePredicates.dfy.expect
@@ -1,9 +1,9 @@
-InductivePredicates.dfy(64,9): Error: assertion violation
+InductivePredicates.dfy(76,9): Error: assertion violation
Execution trace:
(0,0): anon0
(0,0): anon3_Then
-InductivePredicates.dfy(76,10): Error: assertion violation
+InductivePredicates.dfy(88,10): Error: assertion violation
Execution trace:
(0,0): anon0
-Dafny program verifier finished with 29 verified, 2 errors
+Dafny program verifier finished with 35 verified, 2 errors
diff --git a/Test/dafny0/Parallel.dfy b/Test/dafny0/Parallel.dfy
index 030eb350..e0d6491b 100644
--- a/Test/dafny0/Parallel.dfy
+++ b/Test/dafny0/Parallel.dfy
@@ -2,13 +2,13 @@
// RUN: %diff "%s.expect" "%t"
class C {
- var data: int;
- var n: nat;
- var st: set<object>;
+ var data: int
+ var n: nat
+ var st: set<object>
ghost method CLemma(k: int)
- requires k != -23;
- ensures data < k; // magic, isn't it (or bogus, some would say)
+ requires k != -23
+ ensures data < k // magic, isn't it (or bogus, some would say)
}
// This method more or less just tests the syntax, resolution, and basic verification
@@ -19,31 +19,31 @@ method ParallelStatement_Resolve(
S: set<int>,
clx: C, cly: C, clk: int
)
- requires a != null && null !in spine;
- modifies a, spine;
+ requires a != null && null !in spine
+ modifies a, spine
{
- forall (i: int | 0 <= i < a.Length && i % 2 == 0) {
+ forall i | 0 <= i < a.Length && i % 2 == 0 {
a[i] := a[(i + 1) % a.Length] + 3;
}
- forall (o | o in spine) {
+ forall o | o in spine {
o.st := o.st + Repr;
}
- forall (x, y | x in S && 0 <= y+x < 100) {
+ forall x, y | x in S && 0 <= y+x < 100 {
Lemma(clx, x, y); // error: precondition does not hold (clx may be null)
}
- forall (x, y | x in S && 0 <= y+x < 100) {
+ forall x, y | x in S && 0 <= y+x < 100 {
cly.CLemma(x + y); // error: receiver might be null
}
- forall (p | 0 <= p)
- ensures F(p) <= Sum(p) + p - 1; // error (no connection is known between F and Sum)
+ forall p | 0 <= p
+ ensures F(p) <= Sum(p) + p - 1 // error (no connection is known between F and Sum)
{
assert 0 <= G(p);
ghost var t;
- if (p % 2 == 0) {
+ if p % 2 == 0 {
assert G(p) == F(p+2); // error (there's nothing that gives any relation between F and G)
t := p+p;
} else {
@@ -56,11 +56,11 @@ method ParallelStatement_Resolve(
}
}
-ghost method Lemma(c: C, x: int, y: int)
- requires c != null;
- ensures c.data <= x+y;
-ghost method PowerLemma(x: int, y: int)
- ensures Pred(x, y);
+lemma Lemma(c: C, x: int, y: int)
+ requires c != null
+ ensures c.data <= x+y
+lemma PowerLemma(x: int, y: int)
+ ensures Pred(x, y)
function F(x: int): int
function G(x: int): nat
@@ -71,54 +71,54 @@ function Pred(x: int, y: int): bool
// ---------------------------------------------------------------------
method M0(S: set<C>)
- requires null !in S;
- modifies S;
- ensures forall o :: o in S ==> o.data == 85;
- ensures forall o :: o != null && o !in S ==> o.data == old(o.data);
+ requires null !in S
+ modifies S
+ ensures forall o :: o in S ==> o.data == 85
+ ensures forall o :: o != null && o !in S ==> o.data == old(o.data)
{
- forall (s | s in S) {
+ forall s | s in S {
s.data := 85;
}
}
method M1(S: set<C>, x: C)
- requires null !in S && x in S;
+ requires null !in S && x in S
{
- forall (s | s in S)
- ensures s.data < 100;
+ forall s | s in S
+ ensures s.data < 100
{
assume s.data == 85;
}
- if (*) {
+ if * {
assert x.data == 85; // error (cannot be inferred from forall ensures clause)
} else {
assert x.data < 120;
}
- forall (s | s in S)
- ensures s.data < 70; // error
+ forall s | s in S
+ ensures s.data < 70 // error
{
assume s.data == 85;
}
}
method M2() returns (a: array<int>)
- ensures a != null;
- ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j];
+ ensures a != null
+ ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j]
{
a := new int[250];
- forall (i: nat | i < 125) {
+ forall i: nat | i < 125 {
a[i] := 423;
}
- forall (i | 125 <= i < 250) {
+ forall i | 125 <= i < 250 {
a[i] := 300 + i;
}
}
method M4(S: set<C>, k: int)
- modifies S;
+ modifies S
{
- forall (s | s in S && s != null) {
+ forall s | s in S && s != null {
s.n := k; // error: k might be negative
}
}
@@ -127,25 +127,25 @@ method M5()
{
if {
case true =>
- forall (x | 0 <= x < 100) {
+ forall x | 0 <= x < 100 {
PowerLemma(x, x);
}
assert Pred(34, 34);
case true =>
- forall (x,y | 0 <= x < 100 && y == x+1) {
+ forall x,y | 0 <= x < 100 && y == x+1 {
PowerLemma(x, y);
}
assert Pred(34, 35);
case true =>
- forall (x,y | 0 <= x < y < 100) {
+ forall x,y | 0 <= x < y < 100 {
PowerLemma(x, y);
}
assert Pred(34, 35);
case true =>
- forall (x | x in set k | 0 <= k < 100) {
+ forall x | x in set k | 0 <= k < 100 {
PowerLemma(x, x);
}
assert Pred(34, 34);
@@ -155,22 +155,22 @@ method M5()
method Main()
{
var a := new int[180];
- forall (i | 0 <= i < 180) {
+ forall i | 0 <= i < 180 {
a[i] := 2*i + 100;
}
var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5];
- forall (i | 0 <= i < |sq|) {
+ forall i | 0 <= i < |sq| {
a[20+i] := sq[i];
}
- forall (t | t in sq) {
+ forall t | t in sq {
a[t] := 1000;
}
- forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) {
+ forall t,u | t in sq && t < 4 && 10 <= u < 10+t {
a[u] := 6000 + t;
}
var k := 0;
- while (k < 180) {
- if (k != 0) { print ", "; }
+ while k < 180 {
+ if k != 0 { print ", "; }
print a[k];
k := k + 1;
}
@@ -180,50 +180,50 @@ method Main()
method DuplicateUpdate() {
var a := new int[180];
var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5];
- if (*) {
- forall (t,u | t in sq && 10 <= u < 10+t) {
+ if * {
+ forall t,u | t in sq && 10 <= u < 10+t {
a[u] := 6000 + t; // error: a[10] (and a[11]) are assigned more than once
}
} else {
- forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) {
+ forall t,u | t in sq && t < 4 && 10 <= u < 10+t {
a[u] := 6000 + t; // with the 't < 4' conjunct in the line above, this is fine
}
}
}
-ghost method DontDoMuch(x: int)
+lemma DontDoMuch(x: int)
{
}
method OmittedRange() {
- forall (x: int) { } // a type is still needed for the bound variable
- forall (x) {
+ forall x: int { } // a type is still needed for the bound variable
+ forall x {
DontDoMuch(x);
}
}
// ----------------------- two-state postconditions ---------------------------------
-class TwoState_C { ghost var data: int; }
+class TwoState_C { ghost var data: int }
// It is not possible to achieve this postcondition in a ghost method, because ghost
// contexts are not allowed to allocate state. Callers of this ghost method will know
// that the postcondition is tantamount to 'false'.
ghost method TwoState0(y: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ ensures exists o: TwoState_C :: o != null && fresh(o)
method TwoState_Main0() {
- forall (x) { TwoState0(x); }
+ forall x { TwoState0(x); }
assert false; // no prob, because the postcondition of TwoState0 implies false
}
method X_Legit(c: TwoState_C)
- requires c != null;
- modifies c;
+ requires c != null
+ modifies c
{
c.data := c.data + 1;
- forall (x | c.data <= x)
- ensures old(c.data) < x; // note that the 'old' refers to the method's initial state
+ forall x | c.data <= x
+ ensures old(c.data) < x // note that the 'old' refers to the method's initial state
{
}
}
@@ -235,8 +235,8 @@ method X_Legit(c: TwoState_C)
// method, not the beginning of the 'forall' statement.
method TwoState_Main2()
{
- forall (x: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ forall x: int
+ ensures exists o: TwoState_C :: o != null && fresh(o)
{
TwoState0(x);
}
@@ -251,8 +251,8 @@ method TwoState_Main2()
// statement's effect on the heap is not optimized away.
method TwoState_Main3()
{
- forall (x: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ forall x: int
+ ensures exists o: TwoState_C :: o != null && fresh(o)
{
assume false; // (there's no other way to achieve this forall-statement postcondition)
}
@@ -262,11 +262,11 @@ method TwoState_Main3()
// ------- empty forall statement -----------------------------------------
class EmptyForallStatement {
- var emptyPar: int;
+ var emptyPar: int
method Empty_Parallel0()
- modifies this;
- ensures emptyPar == 8;
+ modifies this
+ ensures emptyPar == 8
{
forall () {
this.emptyPar := 8;
@@ -274,11 +274,11 @@ class EmptyForallStatement {
}
function EmptyPar_P(x: int): bool
- ghost method EmptyPar_Lemma(x: int)
- ensures EmptyPar_P(x);
+ lemma EmptyPar_Lemma(x: int)
+ ensures EmptyPar_P(x)
method Empty_Parallel1()
- ensures EmptyPar_P(8);
+ ensures EmptyPar_P(8)
{
forall {
EmptyPar_Lemma(8);
@@ -288,7 +288,7 @@ class EmptyForallStatement {
method Empty_Parallel2()
{
forall
- ensures exists k :: EmptyPar_P(k);
+ ensures exists k :: EmptyPar_P(k)
{
var y := 8;
assume EmptyPar_P(y);
@@ -312,9 +312,9 @@ predicate ThProperty(step: nat, t: Nat, r: nat)
case Succ(o) => step>0 && exists ro:nat :: ThProperty(step-1, o, ro)
}
-ghost method Th(step: nat, t: Nat, r: nat)
- requires t.Succ? && ThProperty(step, t, r);
+lemma Th(step: nat, t: Nat, r: nat)
+ requires t.Succ? && ThProperty(step, t, r)
// the next line follows from the precondition and the definition of ThProperty
- ensures exists ro:nat :: ThProperty(step-1, t.tail, ro);
+ ensures exists ro:nat :: ThProperty(step-1, t.tail, ro)
{
}
diff --git a/Test/dafny3/Filter.dfy b/Test/dafny3/Filter.dfy
index 24c8e94e..6f541396 100644
--- a/Test/dafny3/Filter.dfy
+++ b/Test/dafny3/Filter.dfy
@@ -35,7 +35,7 @@ lemma Lemma_TailSubStreamK(s: Stream, u: Stream, k: nat) // this lemma could ha
{
if k != 0 {
Lemma_InTail(s.head, u);
- Lemma_TailSubStreamK(s.tail, u, k-1);
+ //Lemma_TailSubStreamK(s.tail, u, k-1);
}
}
lemma Lemma_InSubStream<T>(x: T, s: Stream<T>, u: Stream<T>)
@@ -53,104 +53,119 @@ lemma Lemma_InSubStream<T>(x: T, s: Stream<T>, u: Stream<T>)
}
}
-type PredicateHandle
-predicate P<T>(x: T, h: PredicateHandle)
+type Predicate<T> = T -> bool
-copredicate AllP(s: Stream, h: PredicateHandle)
+predicate Total<T,U>(f: T -> U)
+ reads f.reads
{
- P(s.head, h) && AllP(s.tail, h)
+ forall t :: f.reads(t) == {} && f.requires(t)
}
-lemma Lemma_InAllP<T>(x: T, s: Stream<T>, h: PredicateHandle)
- requires In(x, s) && AllP(s, h);
- ensures P(x, h);
+
+copredicate AllP(s: Stream, P: Predicate)
+ requires Total(P)
+{
+ P(s.head) && AllP(s.tail, P)
+}
+lemma Lemma_InAllP<T>(x: T, s: Stream<T>, P: Predicate)
+ requires Total(P)
+ requires In(x, s) && AllP(s, P)
+ ensures P(x)
{
var n :| 0 <= n && Tail(s, n).head == x;
var t := s;
while n != 0
- invariant 0 <= n;
- invariant Tail(t, n).head == x;
- invariant AllP(t, h);
+ invariant 0 <= n
+ invariant Tail(t, n).head == x
+ invariant AllP(t, P)
{
t, n := t.tail, n - 1;
}
}
-predicate IsAnother(s: Stream, h: PredicateHandle)
+predicate IsAnother(s: Stream, P: Predicate)
+ requires Total(P)
{
- exists n :: 0 <= n && P(Tail(s, n).head, h)
+ exists n :: 0 <= n && P(Tail(s, n).head)
}
-copredicate AlwaysAnother(s: Stream, h: PredicateHandle)
+copredicate AlwaysAnother(s: Stream, P: Predicate)
+ requires Total(P)
{
- IsAnother(s, h) && AlwaysAnother(s.tail, h)
+ IsAnother(s, P) && AlwaysAnother(s.tail, P)
}
-colemma Lemma_AllImpliesAlwaysAnother(s: Stream, h: PredicateHandle)
- requires AllP(s, h);
- ensures AlwaysAnother(s, h);
+colemma Lemma_AllImpliesAlwaysAnother(s: Stream, P: Predicate)
+ requires Total(P)
+ requires AllP(s, P)
+ ensures AlwaysAnother(s, P)
{
assert Tail(s, 0) == s;
}
-function Next(s: Stream, h: PredicateHandle): nat
- requires AlwaysAnother(s, h);
- ensures P(Tail(s, Next(s, h)).head, h);
- ensures forall i :: 0 <= i < Next(s, h) ==> !P(Tail(s, i).head, h);
+function Next(s: Stream, P: Predicate): nat
+ requires Total(P)
+ requires AlwaysAnother(s, P)
+ ensures P(Tail(s, Next(s, P)).head)
+ ensures forall i :: 0 <= i < Next(s, P) ==> !P(Tail(s, i).head)
{
- var n :| 0 <= n && P(Tail(s, n).head, h);
- NextMinimizer(s, h, n)
+ var n :| 0 <= n && P(Tail(s, n).head);
+ NextMinimizer(s, P, n)
}
// the following is an auxiliary function of the definition of Next
-function NextMinimizer(s: Stream, h: PredicateHandle, n: nat): nat
- requires P(Tail(s, n).head, h);
- ensures P(Tail(s, NextMinimizer(s, h, n)).head, h);
- ensures forall i :: 0 <= i < NextMinimizer(s, h, n) ==> !P(Tail(s, i).head, h);
+function NextMinimizer(s: Stream, P: Predicate, n: nat): nat
+ requires Total(P)
+ requires P(Tail(s, n).head)
+ ensures P(Tail(s, NextMinimizer(s, P, n)).head)
+ ensures forall i :: 0 <= i < NextMinimizer(s, P, n) ==> !P(Tail(s, i).head)
{
- if forall i :: 0 <= i < n ==> !P(Tail(s, i).head, h) then
+ if forall i :: 0 <= i < n ==> !P(Tail(s, i).head) then
n
else
- var k :| 0 <= k < n && P(Tail(s, k).head, h);
- NextMinimizer(s, h, k)
+ var k :| 0 <= k < n && P(Tail(s, k).head);
+ NextMinimizer(s, P, k)
}
-function Filter(s: Stream, h: PredicateHandle): Stream
- requires AlwaysAnother(s, h);
- decreases Next(s, h);
+function Filter(s: Stream, P: Predicate): Stream
+ requires Total(P)
+ requires AlwaysAnother(s, P)
+ decreases Next(s, P)
{
- if P(s.head, h) then
- Cons(s.head, Filter(s.tail, h))
+ if P(s.head) then
+ Cons(s.head, Filter(s.tail, P))
else
- Filter(s.tail, h)
+ Filter(s.tail, P)
}
// properties about Filter
-colemma Filter_AlwaysAnother(s: Stream, h: PredicateHandle)
- requires AlwaysAnother(s, h);
- ensures AllP(Filter(s, h), h);
- decreases Next(s, h);
+colemma Filter_AlwaysAnother(s: Stream, P: Predicate)
+ requires Total(P)
+ requires AlwaysAnother(s, P)
+ ensures AllP(Filter(s, P), P)
+ decreases Next(s, P)
{
- if P(s.head, h) {
- Filter_AlwaysAnother(s.tail, h);
+ if P(s.head) {
+ Filter_AlwaysAnother(s.tail, P);
} else {
- Filter_AlwaysAnother#[_k](s.tail, h);
+ Filter_AlwaysAnother#[_k](s.tail, P);
}
}
-colemma Filter_IsSubStream(s: Stream, h: PredicateHandle)
- requires AlwaysAnother(s, h);
- ensures IsSubStream(Filter(s, h), s);
- decreases Next(s, h);
+colemma Filter_IsSubStream(s: Stream, P: Predicate)
+ requires Total(P)
+ requires AlwaysAnother(s, P)
+ ensures IsSubStream(Filter(s, P), s)
+ decreases Next(s, P)
{
- if P(s.head, h) {
+ if P(s.head) {
// To prove IsSubStream#[_k](Filter(s, h), s), we prove the two conjuncts from the definition
calc {
true;
- == { Filter_IsSubStream(s.tail, h); } // induction hypothesis
- IsSubStream#[_k-1](Filter(s.tail, h), s.tail);
+ == { Filter_IsSubStream(s.tail, P); } // induction hypothesis
+ IsSubStream#[_k-1](Filter(s.tail, P), s.tail);
== // { assert Filter(s.tail, h) == Filter(s, h).tail; }
- IsSubStream#[_k-1](Filter(s, h).tail, s.tail);
- ==> { Lemma_TailSubStreamK(Filter(s, h).tail, s, _k-1); }
- IsSubStream#[_k-1](Filter(s, h).tail, s);
+ IsSubStream#[_k-1](Filter(s, P).tail, s.tail);
+ ==> { Lemma_TailSubStreamK(Filter(s, P).tail, s, _k-1); }
+ IsSubStream#[_k-1](Filter(s, P).tail, s);
}
calc {
- In(Filter(s, h).head, s);
- == { assert Filter(s, h) == Cons(s.head, Filter(s.tail, h)); }
+ In(Filter(s, P).head, s);
+ == { assert Filter(s, P) == Cons(s.head, Filter(s.tail, P)); }
In(s.head, s);
== { assert Tail(s, 0) == s;
assert exists n :: 0 <= n && Tail(s, n).head == s.head;
@@ -158,105 +173,114 @@ colemma Filter_IsSubStream(s: Stream, h: PredicateHandle)
true;
}
} else {
- Lemma_TailSubStreamK(Filter(s.tail, h), s, _k);
+ Lemma_TailSubStreamK(Filter(s.tail, P), s, _k);
}
}
// The following says nothing about the order of the elements in the stream
-lemma Theorem_Filter<T>(s: Stream<T>, h: PredicateHandle)
- requires AlwaysAnother(s, h);
- ensures forall x :: In(x, Filter(s, h)) <==> In(x, s) && P(x, h);
+lemma Theorem_Filter<T>(s: Stream<T>, P: Predicate)
+ requires Total(P)
+ requires AlwaysAnother(s, P)
+ ensures forall x :: In(x, Filter(s, P)) <==> In(x, s) && P(x)
{
forall x
- ensures In(x, Filter(s, h)) <==> In(x, s) && P(x, h);
+ ensures In(x, Filter(s, P)) <==> In(x, s) && P(x)
{
- if In(x, Filter(s, h)) {
- FS_Ping(s, h, x);
+ if In(x, Filter(s, P)) {
+ FS_Ping(s, P, x);
}
- if In(x, s) && P(x, h) {
+ if In(x, s) && P(x) {
var k :| 0 <= k && Tail(s, k).head == x;
- FS_Pong(s, h, x, k);
+ FS_Pong(s, P, x, k);
}
}
}
-lemma FS_Ping<T>(s: Stream<T>, h: PredicateHandle, x: T)
- requires AlwaysAnother(s, h) && In(x, Filter(s, h));
- ensures In(x, s) && P(x, h);
+lemma FS_Ping<T>(s: Stream<T>, P: Predicate, x: T)
+ requires Total(P)
+ requires AlwaysAnother(s, P) && In(x, Filter(s, P));
+ ensures In(x, s) && P(x);
{
- Filter_IsSubStream(s, h);
- Lemma_InSubStream(x, Filter(s, h), s);
+ Filter_IsSubStream(s, P);
+ Lemma_InSubStream(x, Filter(s, P), s);
- Filter_AlwaysAnother(s, h);
- assert AllP(Filter(s, h), h);
- Lemma_InAllP(x, Filter(s, h), h);
+ Filter_AlwaysAnother(s, P);
+ assert AllP(Filter(s, P), P);
+ Lemma_InAllP(x, Filter(s, P), P);
}
-lemma FS_Pong<T>(s: Stream<T>, h: PredicateHandle, x: T, k: nat)
- requires AlwaysAnother(s, h) && In(x, s) && P(x, h);
- requires Tail(s, k).head == x;
- ensures In(x, Filter(s, h));
- decreases k;
+lemma FS_Pong<T>(s: Stream<T>, P: Predicate, x: T, k: nat)
+ requires Total(P)
+ requires AlwaysAnother(s, P) && In(x, s) && P(x)
+ requires Tail(s, k).head == x
+ ensures In(x, Filter(s, P))
+ decreases k
{
- var fs := Filter(s, h);
+ var fs := Filter(s, P);
if s.head == x {
assert Tail(fs, 0) == fs;
- } else if P(s.head, h) {
- assert fs == Cons(s.head, Filter(s.tail, h)); // reminder of where we are
+ } else if P(s.head) {
+ assert fs == Cons(s.head, Filter(s.tail, P)); // reminder of where we are
calc {
true;
- == { FS_Pong(s.tail, h, x, k-1); }
- In(x, Filter(s.tail, h));
+ //== { FS_Pong(s.tail, h, x, k-1); }
+ In(x, Filter(s.tail, P));
==> { assert fs.head != x; Lemma_InTail(x, fs); }
In(x, fs);
}
} else {
- assert fs == Filter(s.tail, h); // reminder of where we are
- FS_Pong(s.tail, h, x, k-1);
+ //assert fs == Filter(s.tail, h); // reminder of where we are
+ //FS_Pong(s.tail, h, x, k-1);
}
}
// ----- orderings ------
-function Ord<T>(x: T, ord: PredicateHandle): int
+type Ord<T> = T -> int
-copredicate Increasing(s: Stream, ord: PredicateHandle)
+copredicate Increasing(s: Stream, ord: Ord)
+ requires Total(ord)
{
- Ord(s.head, ord) < Ord(s.tail.head, ord) && Increasing(s.tail, ord)
+ ord(s.head) < ord(s.tail.head) && Increasing(s.tail, ord)
}
-copredicate IncrFrom(s: Stream, low: int, ord: PredicateHandle)
+copredicate IncrFrom(s: Stream, low: int, ord: Ord)
+ requires Total(ord)
{
- low <= Ord(s.head, ord) && IncrFrom(s.tail, Ord(s.head, ord) + 1, ord)
+ low <= ord(s.head) && IncrFrom(s.tail, ord(s.head) + 1, ord)
}
-colemma Lemma_Incr0(s: Stream, low: int, ord: PredicateHandle)
- requires IncrFrom(s, low, ord);
- ensures Increasing(s, ord);
+colemma Lemma_Incr0(s: Stream, low: int, ord: Ord)
+ requires Total(ord)
+ requires IncrFrom(s, low, ord)
+ ensures Increasing(s, ord)
{
}
-colemma Lemma_Incr1(s: Stream, ord: PredicateHandle)
+colemma Lemma_Incr1(s: Stream, ord: Ord)
+ requires Total(ord)
requires Increasing(s, ord);
- ensures IncrFrom(s, Ord(s.head, ord), ord);
+ ensures IncrFrom(s, ord(s.head), ord);
{
Lemma_Incr1(s.tail, ord);
}
-lemma Theorem_FilterPreservesOrdering(s: Stream, h: PredicateHandle, ord: PredicateHandle)
- requires Increasing(s, ord) && AlwaysAnother(s, h);
- ensures Increasing(Filter(s, h), ord);
+lemma Theorem_FilterPreservesOrdering(s: Stream, P: Predicate, ord: Ord)
+ requires Total(P) && Total(ord)
+ requires Increasing(s, ord) && AlwaysAnother(s, P)
+ ensures Increasing(Filter(s, P), ord)
{
Lemma_Incr1(s, ord);
- Lemma_FilterPreservesIncrFrom(s, h, Ord(s.head, ord), ord);
- Lemma_Incr0(Filter(s, h), Ord(s.head, ord), ord);
+ Lemma_FilterPreservesIncrFrom(s, P, ord(s.head), ord);
+ Lemma_Incr0(Filter(s, P), ord(s.head), ord);
}
-colemma Lemma_FilterPreservesIncrFrom(s: Stream, h: PredicateHandle, low: int, ord: PredicateHandle)
- requires IncrFrom(s, low, ord) && AlwaysAnother(s, h) && low <= Ord(s.head, ord);
- ensures IncrFrom(Filter(s, h), low, ord);
- decreases Next(s, h);
+colemma Lemma_FilterPreservesIncrFrom(s: Stream, P: Predicate, low: int, ord: Ord)
+ requires Total(P) && Total(ord)
+ requires IncrFrom(s, low, ord) && AlwaysAnother(s, P) && low <= ord(s.head)
+ ensures IncrFrom(Filter(s, P), low, ord)
+ decreases Next(s, P)
{
- if P(s.head, h) {
- Lemma_FilterPreservesIncrFrom(s.tail, h, Ord(s.head, ord)+1, ord);
+ if P(s.head) {
+ Lemma_FilterPreservesIncrFrom(s.tail, P, ord(s.head)+1, ord);
} else {
- Lemma_FilterPreservesIncrFrom#[_k](s.tail, h, low, ord);
+ Lemma_FilterPreservesIncrFrom#[_k](s.tail, P, low, ord);
}
}
diff --git a/Test/dafny3/Filter.dfy.expect b/Test/dafny3/Filter.dfy.expect
index 91aa9b47..6ba9b9bc 100644
--- a/Test/dafny3/Filter.dfy.expect
+++ b/Test/dafny3/Filter.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 43 verified, 0 errors
+Dafny program verifier finished with 42 verified, 0 errors
diff --git a/Test/dafny3/GenericSort.dfy b/Test/dafny3/GenericSort.dfy
index 7ea038be..6bd06965 100644
--- a/Test/dafny3/GenericSort.dfy
+++ b/Test/dafny3/GenericSort.dfy
@@ -7,25 +7,25 @@ abstract module TotalOrder {
// Three properties of total orders. Here, they are given as unproved
// lemmas, that is, as axioms.
lemma Antisymmetry(a: T, b: T)
- requires Leq(a, b) && Leq(b, a);
- ensures a == b;
+ requires Leq(a, b) && Leq(b, a)
+ ensures a == b
lemma Transitivity(a: T, b: T, c: T)
- requires Leq(a, b) && Leq(b, c);
- ensures Leq(a, c);
+ requires Leq(a, b) && Leq(b, c)
+ ensures Leq(a, c)
lemma Totality(a: T, b: T)
- ensures Leq(a, b) || Leq(b, a);
+ ensures Leq(a, b) || Leq(b, a)
}
abstract module Sort {
import O as TotalOrder // let O denote some module that has the members of TotalOrder
predicate Sorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- reads a;
+ requires a != null && 0 <= low <= high <= a.Length
+ reads a
// The body of the predicate is hidden outside the module, but the postcondition is
// "exported" (that is, visible, known) outside the module. Here, we choose the
// export the following property:
- ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j]);
+ ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j])
{
forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j])
}
@@ -33,18 +33,18 @@ abstract module Sort {
// In the insertion sort routine below, it's more convenient to keep track of only that
// neighboring elements of the array are sorted...
predicate NeighborSorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- reads a;
+ requires a != null && 0 <= low <= high <= a.Length
+ reads a
{
forall i :: low < i < high ==> O.Leq(a[i-1], a[i])
}
// ...but we show that property to imply all pairs to be sorted. The proof of this
// lemma uses the transitivity property.
lemma NeighborSorted_implies_Sorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- requires NeighborSorted(a, low, high);
- ensures Sorted(a, low, high);
- decreases high - low;
+ requires a != null && 0 <= low <= high <= a.Length
+ requires NeighborSorted(a, low, high)
+ ensures Sorted(a, low, high)
+ decreases high - low
{
if low != high {
NeighborSorted_implies_Sorted(a, low+1, high);
@@ -57,25 +57,25 @@ abstract module Sort {
// Standard insertion sort method
method InsertionSort(a: array<O.T>)
- requires a != null;
- modifies a;
- ensures Sorted(a, 0, a.Length);
- ensures multiset(a[..]) == old(multiset(a[..]));
+ requires a != null
+ modifies a
+ ensures Sorted(a, 0, a.Length)
+ ensures multiset(a[..]) == old(multiset(a[..]))
{
if a.Length == 0 { return; }
var i := 1;
while i < a.Length
- invariant 0 < i <= a.Length;
- invariant NeighborSorted(a, 0, i);
- invariant multiset(a[..]) == old(multiset(a[..]));
+ invariant 0 < i <= a.Length
+ invariant NeighborSorted(a, 0, i)
+ invariant multiset(a[..]) == old(multiset(a[..]))
{
var j := i;
while 0 < j && !O.Leq(a[j-1], a[j])
- invariant 0 <= j <= i;
- invariant NeighborSorted(a, 0, j);
- invariant NeighborSorted(a, j, i+1);
- invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1]);
- invariant multiset(a[..]) == old(multiset(a[..]));
+ invariant 0 <= j <= i
+ invariant NeighborSorted(a, 0, j)
+ invariant NeighborSorted(a, j, i+1)
+ invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1])
+ invariant multiset(a[..]) == old(multiset(a[..]))
{
// The proof of correctness uses the totality property here.
// It implies that if two elements are not previously in
@@ -97,7 +97,7 @@ module IntOrder refines TotalOrder {
datatype T = Int(i: int)
// Define the ordering on these integers
predicate method Leq ...
- ensures Leq(a, b) ==> a.i <= b.i;
+ ensures Leq(a, b) ==> a.i <= b.i
{
a.i <= b.i
}
@@ -156,7 +156,7 @@ module intOrder refines TotalOrder {
type T = int
// Define the ordering on these integers
predicate method Leq ...
- ensures Leq(a, b) ==> a <= b;
+ ensures Leq(a, b) ==> a <= b
{
a <= b
}
diff --git a/Test/dafny3/InfiniteTrees.dfy b/Test/dafny3/InfiniteTrees.dfy
index 516a9e4e..85f73bc3 100644
--- a/Test/dafny3/InfiniteTrees.dfy
+++ b/Test/dafny3/InfiniteTrees.dfy
@@ -286,7 +286,7 @@ lemma Theorem1_Lemma(t: Tree, n: nat, p: Stream<int>)
LowerThan(ch, n);
==> // def. LowerThan
LowerThan(ch.head.children, n-1);
- ==> { Theorem1_Lemma(ch.head, n-1, tail); }
+ ==> //{ Theorem1_Lemma(ch.head, n-1, tail); }
!IsNeverEndingStream(tail);
==> // def. IsNeverEndingStream
!IsNeverEndingStream(p);
@@ -374,30 +374,30 @@ lemma Proposition3b()
}
}
lemma Proposition3b_Lemma(t: Tree, h: nat, p: Stream<int>)
- requires LowerThan(t.children, h) && ValidPath(t, p);
- ensures !IsNeverEndingStream(p);
- decreases h;
+ requires LowerThan(t.children, h) && ValidPath(t, p)
+ ensures !IsNeverEndingStream(p)
+ decreases h
{
match p {
case Nil =>
case Cons(index, tail) =>
// From the definition of ValidPath(t, p), we get the following:
var ch := Tail(t.children, index);
- assert ch.Cons? && ValidPath(ch.head, tail);
+ // assert ch.Cons? && ValidPath(ch.head, tail);
// From the definition of LowerThan(t.children, h), we get the following:
match t.children {
case Nil =>
ValidPath_Lemma(p);
assert false; // absurd case
case Cons(_, _) =>
- assert 1 <= h;
+ // assert 1 <= h;
LowerThan_Lemma(t.children, index, h);
- assert LowerThan(ch, h);
+ // assert LowerThan(ch, h);
}
// Putting these together, by ch.Cons? and the definition of LowerThan(ch, h), we get:
- assert LowerThan(ch.head.children, h-1);
+ // assert LowerThan(ch.head.children, h-1);
// And now we can invoke the induction hypothesis:
- Proposition3b_Lemma(ch.head, h-1, tail);
+ // Proposition3b_Lemma(ch.head, h-1, tail);
}
}
@@ -627,30 +627,10 @@ colemma Path_Lemma2'(p: Stream<int>)
}
}
colemma Path_Lemma2''(p: Stream<int>, n: nat, tail: Stream<int>)
- requires IsNeverEndingStream(p) && p.tail == tail;
- ensures InfinitePath'(S2N'(n, tail));
+ requires IsNeverEndingStream(p) && p.tail == tail
+ ensures InfinitePath'(S2N'(n, tail))
{
- if n <= 0 {
- calc {
- InfinitePath'#[_k](S2N'(n, tail));
- // def. S2N'
- InfinitePath'#[_k](Zero(S2N(tail)));
- // def. InfinitePath'
- InfinitePath#[_k-1](S2N(tail));
- { Path_Lemma2'(tail); }
- true;
- }
- } else {
- calc {
- InfinitePath'#[_k](S2N'(n, tail));
- // def. S2N'
- InfinitePath'#[_k](Succ(S2N'(n-1, tail)));
- // def. InfinitePath'
- InfinitePath'#[_k-1](S2N'(n-1, tail));
- { Path_Lemma2''(p, n-1, tail); }
- true;
- }
- }
+ Path_Lemma2'(tail);
}
lemma Path_Lemma3(r: CoOption<Number>)
ensures InfinitePath(r) ==> IsNeverEndingStream(N2S(r));
diff --git a/Test/dafny4/ACL2-extractor.dfy b/Test/dafny4/ACL2-extractor.dfy
index 8fe98531..bd2c9d83 100644
--- a/Test/dafny4/ACL2-extractor.dfy
+++ b/Test/dafny4/ACL2-extractor.dfy
@@ -116,9 +116,9 @@ lemma RevLength(xs: List)
// you can prove two lists equal by proving their elements equal
lemma EqualElementsMakeEqualLists(xs: List, ys: List)
- requires length(xs) == length(ys);
- requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys);
- ensures xs == ys;
+ requires length(xs) == length(ys)
+ requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys)
+ ensures xs == ys
{
match xs {
case Nil =>
@@ -132,7 +132,7 @@ lemma EqualElementsMakeEqualLists(xs: List, ys: List)
nth(i+1, xs) == nth(i+1, ys);
}
}
- EqualElementsMakeEqualLists(xs.tail, ys.tail);
+ // EqualElementsMakeEqualLists(xs.tail, ys.tail);
}
}
diff --git a/Test/dafny4/KozenSilva.dfy b/Test/dafny4/KozenSilva.dfy
index af0cdc71..ef49b10f 100644
--- a/Test/dafny4/KozenSilva.dfy
+++ b/Test/dafny4/KozenSilva.dfy
@@ -25,8 +25,8 @@ copredicate LexLess(s: Stream<int>, t: Stream<int>)
// A co-lemma is used to establish the truth of a co-predicate.
colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires LexLess(s, t) && LexLess(t, u);
- ensures LexLess(s, u);
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
{
// Here is the proof, which is actually a body of code. It lends itself to a
// simple, intuitive co-inductive reading. For a theorem this simple, this simple
@@ -38,6 +38,14 @@ colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream
}
}
+// Actually, Dafny can do the proof of the previous lemma completely automatically. Here it is:
+colemma Theorem1_LexLess_Is_Transitive_Automatic(s: Stream<int>, t: Stream<int>, u: Stream<int>)
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
+{
+ // no manual proof needed, so the body of the co-lemma is empty
+}
+
// The following predicate captures the (inductively defined) negation of (the
// co-inductively defined) LexLess above.
predicate NotLexLess(s: Stream<int>, t: Stream<int>)
@@ -51,7 +59,7 @@ predicate NotLexLess'(k: nat, s: Stream<int>, t: Stream<int>)
}
lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
- ensures LexLess(s, t) <==> !NotLexLess(s, t);
+ ensures LexLess(s, t) <==> !NotLexLess(s, t)
{
if !NotLexLess(s, t) {
EquivalenceTheorem0(s, t);
@@ -61,8 +69,8 @@ lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
}
}
colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
- requires !NotLexLess(s, t);
- ensures LexLess(s, t);
+ requires !NotLexLess(s, t)
+ ensures LexLess(s, t)
{
// Here, more needs to be said about the way Dafny handles co-lemmas.
// The way a co-lemma establishes a co-predicate is to prove, by induction,
@@ -74,14 +82,14 @@ colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
// indicates a finite unrolling of a co-inductive predicate. In particular,
// LexLess#[k] refers to k unrollings of LexLess.
lemma EquivalenceTheorem0_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires !NotLexLess'(k, s, t);
- ensures LexLess#[k](s, t);
+ requires !NotLexLess'(k, s, t)
+ ensures LexLess#[k](s, t)
{
// This simple inductive proof is done completely automatically by Dafny.
}
lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess(s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess(s, t)
{
// The forall statement in Dafny is used, here, as universal introduction:
// what EquivalenceTheorem1_Lemma establishes for one k, the forall
@@ -91,22 +99,22 @@ lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
}
}
lemma EquivalenceTheorem1_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess'(k, s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess'(k, s, t)
{
}
lemma Theorem1_Alt(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess(s, u);
- ensures NotLexLess(s, t) || NotLexLess(t, u);
+ requires NotLexLess(s, u)
+ ensures NotLexLess(s, t) || NotLexLess(t, u)
{
forall k: nat | NotLexLess'(k, s, u) {
Theorem1_Alt_Lemma(k, s, t, u);
}
}
lemma Theorem1_Alt_Lemma(k: nat, s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess'(k, s, u);
- ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u);
+ requires NotLexLess'(k, s, u)
+ ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u)
{
}
@@ -116,8 +124,8 @@ function PointwiseAdd(s: Stream<int>, t: Stream<int>): Stream<int>
}
colemma Theorem2_Pointwise_Addition_Is_Monotone(s: Stream<int>, t: Stream<int>, u: Stream<int>, v: Stream<int>)
- requires LexLess(s, t) && LexLess(u, v);
- ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v));
+ requires LexLess(s, t) && LexLess(u, v)
+ ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v))
{
// The co-lemma will establish the co-inductive predicate by establishing
// all finite unrollings thereof. Each finite unrolling is proved by
@@ -148,15 +156,9 @@ copredicate Subtype(a: RecType, b: RecType)
}
colemma Theorem3_Subtype_Is_Transitive(a: RecType, b: RecType, c: RecType)
- requires Subtype(a, b) && Subtype(b, c);
- ensures Subtype(a, c);
-{
- if a == Bottom || c == Top {
- // done
- } else {
- Theorem3_Subtype_Is_Transitive(c.dom, b.dom, a.dom);
- Theorem3_Subtype_Is_Transitive(a.ran, b.ran, c.ran);
- }
+ requires Subtype(a, b) && Subtype(b, c)
+ ensures Subtype(a, c)
+{
}
// --------------------------------------------------------------------------
@@ -184,16 +186,16 @@ copredicate ValBelow(u: Val, v: Val)
}
colemma Theorem4a_ClEnvBelow_Is_Transitive(c: ClEnv, d: ClEnv, e: ClEnv)
- requires ClEnvBelow(c, d) && ClEnvBelow(d, e);
- ensures ClEnvBelow(c, e);
+ requires ClEnvBelow(c, d) && ClEnvBelow(d, e)
+ ensures ClEnvBelow(c, e)
{
forall y | y in c.m {
Theorem4b_ValBelow_Is_Transitive#[_k-1](c.m[y], d.m[y], e.m[y]);
}
}
colemma Theorem4b_ValBelow_Is_Transitive(u: Val, v: Val, w: Val)
- requires ValBelow(u, v) && ValBelow(v, w);
- ensures ValBelow(u, w);
+ requires ValBelow(u, v) && ValBelow(v, w)
+ ensures ValBelow(u, w)
{
if u.ValCl? {
Theorem4a_ClEnvBelow_Is_Transitive(u.cl.env, v.cl.env, w.cl.env);
@@ -209,7 +211,7 @@ predicate IsCapsule(cap: Capsule)
}
function ClosureConversion(cap: Capsule): Cl
- requires IsCapsule(cap);
+ requires IsCapsule(cap)
{
Closure(cap.e.abs, ClosureConvertedMap(cap.s))
// In the Kozen and Silva paper, there are more conditions, having to do with free variables,
@@ -229,8 +231,8 @@ predicate CapsuleEnvironmentBelow(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAb
}
colemma Theorem5_ClosureConversion_Is_Monotone(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAbs>)
- requires CapsuleEnvironmentBelow(s, t);
- ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t));
+ requires CapsuleEnvironmentBelow(s, t)
+ ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t))
{
}
@@ -251,8 +253,8 @@ copredicate Bisim(s: Stream, t: Stream)
}
colemma Theorem6_Bisim_Is_Symmetric(s: Stream, t: Stream)
- requires Bisim(s, t);
- ensures Bisim(t, s);
+ requires Bisim(s, t)
+ ensures Bisim(t, s)
{
// proof is automatic
}
@@ -270,8 +272,8 @@ function merge(s: Stream, t: Stream): Stream
// In general, the termination argument needs to be supplied explicitly in terms
// of a metric, rank, variant function, or whatever you want to call it--a
// "decreases" clause in Dafny. Dafny provides some help in making up "decreases"
-// clauses, and in this case it automatically adds "decreases 0;" to SplitLeft
-// and "decreases 1;" to SplitRight. With these "decreases" clauses, the
+// clauses, and in this case it automatically adds "decreases 0" to SplitLeft
+// and "decreases 1" to SplitRight. With these "decreases" clauses, the
// termination check of SplitRight's call to SplitLeft will simply be "0 < 1",
// which is trivial to check.
function SplitLeft(s: Stream): Stream
@@ -284,7 +286,7 @@ function SplitRight(s: Stream): Stream
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
- ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s);
+ ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s)
{
var LHS := merge(SplitLeft(s), SplitRight(s));
// The construct that follows is a "calc" statement. It gives a way to write an
@@ -318,7 +320,7 @@ colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Equal(s: Stream)
- ensures merge(SplitLeft(s), SplitRight(s)) == s;
+ ensures merge(SplitLeft(s), SplitRight(s)) == s
{
// The proof of this co-lemma is actually done completely automatically (so the
// body of this co-lemma can be empty). However, just to show what the calculations
diff --git a/Test/dafny4/KozenSilva.dfy.expect b/Test/dafny4/KozenSilva.dfy.expect
index c6c90498..90432af3 100644
--- a/Test/dafny4/KozenSilva.dfy.expect
+++ b/Test/dafny4/KozenSilva.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 47 verified, 0 errors
+Dafny program verifier finished with 49 verified, 0 errors
diff --git a/Test/dafny4/NipkowKlein-chapter7.dfy b/Test/dafny4/NipkowKlein-chapter7.dfy
index 7db31cbd..e694fc4b 100644
--- a/Test/dafny4/NipkowKlein-chapter7.dfy
+++ b/Test/dafny4/NipkowKlein-chapter7.dfy
@@ -112,13 +112,6 @@ inductive lemma lemma_7_6(b: bexp, c: com, c': com, s: state, t: state)
requires big_step(While(b, c), s, t) && equiv_c(c, c')
ensures big_step(While(b, c'), s, t)
{
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| big_step#[_k-1](c, s, s') && big_step#[_k-1](While(b, c), s', t);
- lemma_7_6(b, c, c', s', t); // induction hypothesis
- assert big_step(While(b, c'), s', t);
- }
}
// equiv_c is an equivalence relation
@@ -139,27 +132,7 @@ inductive lemma IMP_is_deterministic(c: com, s: state, t: state, t': state)
requires big_step(c, s, t) && big_step(c, s, t')
ensures t == t'
{
- match c
- case SKIP =>
- // trivial
- case Assign(x, a) =>
- // trivial
- case Seq(c0, c1) =>
- var s' :| big_step#[_k-1](c0, s, s') && big_step#[_k-1](c1, s', t);
- var s'' :| big_step#[_k-1](c0, s, s'') && big_step#[_k-1](c1, s'', t');
- IMP_is_deterministic(c0, s, s', s'');
- IMP_is_deterministic(c1, s', t, t');
- case If(b, thn, els) =>
- IMP_is_deterministic(if bval(b, s) then thn else els, s, t, t');
- case While(b, body) =>
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| big_step#[_k-1](body, s, s') && big_step#[_k-1](While(b, body), s', t);
- var s'' :| big_step#[_k-1](body, s, s'') && big_step#[_k-1](While(b, body), s'', t');
- IMP_is_deterministic(body, s, s', s'');
- IMP_is_deterministic(While(b, body), s', t, t');
- }
+ // Dafny totally rocks!
}
// ----- Small-step semantics -----
@@ -214,12 +187,6 @@ inductive lemma star_transitive_aux(c0: com, s0: state, c1: com, s1: state, c2:
requires small_step_star(c0, s0, c1, s1)
ensures small_step_star(c1, s1, c2, s2) ==> small_step_star(c0, s0, c2, s2)
{
- if c0 == c1 && s0 == s1 {
- } else {
- var c', s' :|
- small_step(c0, s0, c', s') && small_step_star#[_k-1](c', s', c1, s1);
- star_transitive_aux(c', s', c1, s1, c2, s2);
- }
}
// The big-step semantics can be simulated by some number of small steps
@@ -240,15 +207,14 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
small_step_star(Seq(c0, c1), s, Seq(SKIP, c1), s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ lemma_7_13(c0, s, SKIP, s', c1); }
small_step_star(c0, s, SKIP, s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
- { BigStep_implies_SmallStepStar(c0, s, s'); }
+ //{ BigStep_implies_SmallStepStar(c0, s, s'); }
small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ assert small_step(Seq(SKIP, c1), s', c1, s'); }
small_step_star(c1, s', SKIP, t);
- { BigStep_implies_SmallStepStar(c1, s', t); }
+ //{ BigStep_implies_SmallStepStar(c1, s', t); }
true;
}
case If(b, thn, els) =>
- BigStep_implies_SmallStepStar(if bval(b, s) then thn else els, s, t);
case While(b, body) =>
if !bval(b, s) && s == t {
calc <== {
@@ -271,11 +237,11 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
small_step_star(Seq(body, While(b, body)), s, Seq(SKIP, While(b, body)), s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ lemma_7_13(body, s, SKIP, s', While(b, body)); }
small_step_star(body, s, SKIP, s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
- { BigStep_implies_SmallStepStar(body, s, s'); }
+ //{ BigStep_implies_SmallStepStar(body, s, s'); }
small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ assert small_step(Seq(SKIP, While(b, body)), s', While(b, body), s'); }
small_step_star(While(b, body), s', SKIP, t);
- { BigStep_implies_SmallStepStar(While(b, body), s', t); }
+ //{ BigStep_implies_SmallStepStar(While(b, body), s', t); }
true;
}
}
@@ -299,7 +265,6 @@ inductive lemma SmallStepStar_implies_BigStep(c: com, s: state, t: state)
if c == SKIP && s == t {
} else {
var c', s' :| small_step(c, s, c', s') && small_step_star#[_k-1](c', s', SKIP, t);
- SmallStepStar_implies_BigStep(c', s', t);
SmallStep_plus_BigStep(c, s, c', s', t);
}
}
@@ -316,7 +281,6 @@ inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t:
var c0' :| c' == Seq(c0', c1) && small_step(c0, s, c0', s');
if big_step(c', s', t) {
var s'' :| big_step(c0', s', s'') && big_step(c1, s'', t);
- SmallStep_plus_BigStep(c0, s, c0', s', s'');
}
}
case If(b, thn, els) =>
diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy
index 3dba6325..0d6cffa1 100644
--- a/Test/dafny4/NumberRepresentations.dfy
+++ b/Test/dafny4/NumberRepresentations.dfy
@@ -234,17 +234,16 @@ lemma UniqueRepresentation(a: seq<int>, b: seq<int>, lowDigit: int, base: nat)
}
lemma ZeroIsUnique(a: seq<int>, lowDigit: int, base: nat)
- requires 2 <= base && lowDigit <= 0 < lowDigit + base;
- requires a == trim(a);
- requires IsSkewNumber(a, lowDigit, base);
- requires eval(a, base) == 0;
- ensures a == [];
+ requires 2 <= base && lowDigit <= 0 < lowDigit + base
+ requires a == trim(a)
+ requires IsSkewNumber(a, lowDigit, base)
+ requires eval(a, base) == 0
+ ensures a == []
{
if a != [] {
- assert eval(a, base) == a[0] + base * eval(a[1..], base);
if eval(a[1..], base) == 0 {
TrimProperty(a);
- ZeroIsUnique(a[1..], lowDigit, base);
+ // ZeroIsUnique(a[1..], lowDigit, base);
}
assert false;
}
diff --git a/Test/vstte2012/Tree.dfy b/Test/vstte2012/Tree.dfy
index a346aac5..662024e4 100644
--- a/Test/vstte2012/Tree.dfy
+++ b/Test/vstte2012/Tree.dfy
@@ -22,9 +22,9 @@ datatype Result = Fail | Res(t: Tree, sOut: seq<int>)
// The postconditions state properties that are needed
// in the completeness proof.
function toList(d: int, t: Tree): seq<int>
- ensures toList(d, t) != [] && toList(d, t)[0] >= d;
- ensures (toList(d, t)[0] == d) == (t == Leaf);
- decreases t;
+ ensures toList(d, t) != [] && toList(d, t)[0] >= d
+ ensures (toList(d, t)[0] == d) == (t == Leaf)
+ decreases t
{
match t
case Leaf => [d]
@@ -43,10 +43,10 @@ function toList(d: int, t: Tree): seq<int>
function method build_rec(d: int, s: seq<int>): Result
ensures build_rec(d, s).Res? ==>
|build_rec(d, s).sOut| < |s| &&
- build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..];
+ build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..]
ensures build_rec(d, s).Res? ==>
- toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|];
- decreases |s|, (if s==[] then 0 else s[0]-d);
+ toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|]
+ decreases |s|, (if s==[] then 0 else s[0]-d)
{
if s==[] || s[0] < d then
Fail
@@ -68,7 +68,7 @@ function method build_rec(d: int, s: seq<int>): Result
// sequence yields exactly the input sequence.
// Completeness is proved as a lemma, see below.
function method build(s: seq<int>): Result
- ensures build(s).Res? ==> toList(0,build(s).t) == s;
+ ensures build(s).Res? ==> toList(0,build(s).t) == s
{
var r := build_rec(0, s);
if r.Res? && r.sOut == [] then r else Fail
@@ -86,16 +86,14 @@ function method build(s: seq<int>): Result
// correctly (encoded by calls to this lemma).
lemma lemma0(t: Tree, d: int, s: seq<int>)
ensures build_rec(d, toList(d, t) + s).Res? &&
- build_rec(d, toList(d, t) + s).sOut == s;
+ build_rec(d, toList(d, t) + s).sOut == s
{
match(t) {
case Leaf =>
assert toList(d, t) == [d];
case Node(l, r) =>
assert toList(d, t) + s == toList(d+1, l) + (toList(d+1, r) + s);
-
- lemma0(l, d+1, toList(d+1, r) + s); // apply the induction hypothesis
- lemma0(r, d+1, s); // apply the induction hypothesis
+ // the rest follows from (two invocations of) the (automatically applied) induction hypothesis
}
}
@@ -107,8 +105,8 @@ lemma lemma0(t: Tree, d: int, s: seq<int>)
// the following two lemmas replace these arguments
// by quantified variables.
lemma lemma1(t: Tree, s:seq<int>)
- requires s == toList(0, t) + [];
- ensures build(s).Res?;
+ requires s == toList(0, t) + []
+ ensures build(s).Res?
{
lemma0(t, 0, []);
}
@@ -117,7 +115,7 @@ lemma lemma1(t: Tree, s:seq<int>)
// This lemma introduces the existential quantifier in the completeness
// property.
lemma lemma2(s: seq<int>)
- ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?;
+ ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?
{
forall t | toList(0,t) == s {
lemma1(t, s);
@@ -131,7 +129,7 @@ lemma lemma2(s: seq<int>)
// The body of the method converts the argument of lemma2
// into a universally quantified variable.
lemma completeness()
- ensures forall s: seq<int> :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?);
+ ensures forall s: seq<int> :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?)
{
forall s {
lemma2(s);
@@ -145,7 +143,7 @@ lemma completeness()
// unfold the necessary definitions.
method harness0()
ensures build([1,3,3,2]).Res? &&
- build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf));
+ build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf))
{
}
@@ -155,6 +153,6 @@ method harness0()
// assertions are required by the verifier to
// unfold the necessary definitions.
method harness1()
- ensures build([1,3,2,2]).Fail?;
+ ensures build([1,3,2,2]).Fail?
{
}