From 19176cf2d4ae511bcfb32ea2d417dae83861728a Mon Sep 17 00:00:00 2001 From: leino Date: Fri, 12 Jun 2015 15:59:17 -0700 Subject: Added /vcs... cop-out to test case to make it go through --- Test/dafny4/NumberRepresentations.dfy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Test/dafny4/NumberRepresentations.dfy') diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy index 1ebdf64c..8a94505c 100644 --- a/Test/dafny4/NumberRepresentations.dfy +++ b/Test/dafny4/NumberRepresentations.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /vcsMaxKeepGoingSplits:5 /dprint:"%t.dprint" "%s" > "%t" // RUN: %diff "%s.expect" "%t" // We consider a number representation that consists of a sequence of digits. The least -- cgit v1.2.3 From 550caf7bc7e6427f26bfb3d24f224e12c6c1c318 Mon Sep 17 00:00:00 2001 From: leino Date: Wed, 1 Jul 2015 13:29:11 -0700 Subject: Fixed bug in BplImp! Improvements in encoding of reads of function values. --- Binaries/DafnyPrelude.bpl | 8 ++++++++ Source/Dafny/Translator.cs | 16 +++++++--------- Test/VerifyThis2015/Problem3.dfy | 2 +- Test/dafny4/NumberRepresentations.dfy | 2 +- 4 files changed, 17 insertions(+), 11 deletions(-) (limited to 'Test/dafny4/NumberRepresentations.dfy') diff --git a/Binaries/DafnyPrelude.bpl b/Binaries/DafnyPrelude.bpl index dbf9b76c..2ca10f73 100644 --- a/Binaries/DafnyPrelude.bpl +++ b/Binaries/DafnyPrelude.bpl @@ -273,6 +273,8 @@ const unique class._System.set: ClassName; const unique class._System.seq: ClassName; const unique class._System.multiset: ClassName; +function Tclass._System.object(): Ty; + function /*{:never_pattern true}*/ dtype(ref): Ty; // changed from ClassName to Ty function TypeTuple(a: ClassName, b: ClassName): ClassName; @@ -287,6 +289,12 @@ axiom (forall a: ClassName, b: ClassName :: { TypeTuple(a,b) } type HandleType; +function SetRef_to_SetBox(s: [ref]bool): Set Box; +axiom (forall s: [ref]bool, bx: Box :: { SetRef_to_SetBox(s)[bx] } + SetRef_to_SetBox(s)[bx] == s[$Unbox(bx): ref]); +axiom (forall s: [ref]bool :: { SetRef_to_SetBox(s) } + $Is(SetRef_to_SetBox(s), TSet(Tclass._System.object()))); + // --------------------------------------------------------------- // -- Datatypes -------------------------------------------------- // --------------------------------------------------------------- diff --git a/Source/Dafny/Translator.cs b/Source/Dafny/Translator.cs index c98bd203..78f5c89d 100644 --- a/Source/Dafny/Translator.cs +++ b/Source/Dafny/Translator.cs @@ -5713,11 +5713,9 @@ namespace Microsoft.Dafny { { // forall t1, .., tN+1 : Ty, p: [Heap, Box, ..., Box] Box, heap : Heap, b1, ..., bN : Box - // :: RequriesN(...) ==> ApplyN(t1, .. tN+1, HandleN(h, r, rd), heap, b1, ..., bN) = h[heap, b1, ..., bN] - // - // no precondition for these, but: - // for requires, we add: RequiresN(...) <== r[heap, b1, ..., bN] - // for reads, we could: ReadsN(...)[bx] ==> rd[heap, b1, ..., bN][bx] , but we don't + // :: ApplyN(t1, .. tN+1, HandleN(h, r, rd), heap, b1, ..., bN) == h[heap, b1, ..., bN] + // :: RequiresN(t1, .. tN+1, HandleN(h, r, rd), heap, b1, ..., bN) <== r[heap, b1, ..., bN] + // :: ReadsN(t1, .. tN+1, HandleN(h, r, rd), heap, b1, ..., bN) == rd[heap, b1, ..., bN] Action SelectorSemantics = (selector, selectorTy, selectorVar, selectorVarTy, precond, precondTy) => { Contract.Assert((precond == null) == (precondTy == null)); var bvars = new List(); @@ -5919,7 +5917,7 @@ namespace Microsoft.Dafny { var inner_name = GetClass(td).TypedIdent.Name; string name = "T" + inner_name; // Create the type constructor - { + if (td.Name != "object") { // the type constructor for "object" is in DafnyPrelude.bpl Bpl.Variable tyVarOut = BplFormalVar(null, predef.Ty, false); List args = new List( Enumerable.Range(0, arity).Select(i => @@ -11462,10 +11460,10 @@ namespace Microsoft.Dafny { } var rdvars = new List(); - var o = translator.UnboxIfBoxed(BplBoundVar(varNameGen.FreshId("#o#"), predef.BoxType, rdvars), new ObjectType()); - + var o = BplBoundVar(varNameGen.FreshId("#o#"), predef.RefType, rdvars); Bpl.Expr rdbody = new Bpl.LambdaExpr(e.tok, new List(), rdvars, null, translator.InRWClause(e.tok, o, null, e.Reads.ConvertAll(su.SubstFrameExpr), et, null, null)); + rdbody = translator.FunctionCall(e.tok, "SetRef_to_SetBox", predef.SetType(e.tok, true, predef.BoxType), rdbody); return translator.Lit( translator.FunctionCall(e.tok, BuiltinFunction.AtLayer, predef.HandleType, @@ -14220,7 +14218,7 @@ namespace Microsoft.Dafny { Contract.Requires(b != null); Contract.Ensures(Contract.Result() != null); - if (a == Bpl.Expr.True || b == Bpl.Expr.True || b == Bpl.Expr.False) { + if (a == Bpl.Expr.True || b == Bpl.Expr.True) { return b; } else if (a == Bpl.Expr.False) { return Bpl.Expr.True; diff --git a/Test/VerifyThis2015/Problem3.dfy b/Test/VerifyThis2015/Problem3.dfy index 10ad2d3a..4205035d 100644 --- a/Test/VerifyThis2015/Problem3.dfy +++ b/Test/VerifyThis2015/Problem3.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" /vcsMaxKeepGoingSplits:5 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Rustan Leino diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy index 8a94505c..1ebdf64c 100644 --- a/Test/dafny4/NumberRepresentations.dfy +++ b/Test/dafny4/NumberRepresentations.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /vcsMaxKeepGoingSplits:5 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" // RUN: %diff "%s.expect" "%t" // We consider a number representation that consists of a sequence of digits. The least -- cgit v1.2.3 From d9e4d4e4979ecb10f18583f51cd94e71a4dca9a4 Mon Sep 17 00:00:00 2001 From: Clément Pit--Claudel Date: Fri, 17 Jul 2015 11:49:37 -0700 Subject: Enable autoTriggers in LitTriggers and SeqFromArray --- Test/dafny0/LitTriggers.dfy | 6 +++++- Test/dafny0/SeqFromArray.dfy | 8 +++++++- Test/dafny4/NumberRepresentations.dfy | 4 ++++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'Test/dafny4/NumberRepresentations.dfy') diff --git a/Test/dafny0/LitTriggers.dfy b/Test/dafny0/LitTriggers.dfy index 14880d68..93e65643 100644 --- a/Test/dafny0/LitTriggers.dfy +++ b/Test/dafny0/LitTriggers.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:1 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Imported from bug 76. LitInt would be triggered on, causing matching failures. @@ -33,3 +33,7 @@ lemma L4(x:int, y:int) { assert P(x, y + 1); } + +// Local Variables: +// dafny-prover-local-args: ("/autoTriggers:1") +// End: diff --git a/Test/dafny0/SeqFromArray.dfy b/Test/dafny0/SeqFromArray.dfy index 3a8760ba..629c5045 100644 --- a/Test/dafny0/SeqFromArray.dfy +++ b/Test/dafny0/SeqFromArray.dfy @@ -1,6 +1,8 @@ -// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:1 "%s" > "%t" // RUN: %diff "%s.expect" "%t" +// /autoTriggers:1 added to suppress instabilities + method Main() { } method H(a: array, c: array, n: nat, j: nat) @@ -88,3 +90,7 @@ method M(a: array, c: array, m: nat, n: nat, k: nat, l: nat) assert a[k..k+m] == c[l+k..l+k+m]; } } + +// Local Variables: +// dafny-prover-local-args: ("/autoTriggers:1") +// End: diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy index 8a94505c..f51ae924 100644 --- a/Test/dafny4/NumberRepresentations.dfy +++ b/Test/dafny4/NumberRepresentations.dfy @@ -293,3 +293,7 @@ lemma MulInverse(x: int, a: int, b: int, y: int) ensures a == b; { } + +// Local Variables: +// dafny-prover-local-args: ("/vcsMaxKeepGoingSplits:5") +// End: -- cgit v1.2.3 From f28fa68497352ffb57ab2846da4cc1c1aeaf1968 Mon Sep 17 00:00:00 2001 From: leino Date: Wed, 12 Aug 2015 22:44:50 -0700 Subject: Change the induction heuristic for lemmas to also look in precondition for clues about which parameters to include. As a result, improved many test cases (see, e.g., dafny4/NipkowKlein-chapter7.dfy!) and beautified some others. --- Source/Dafny/Printer.cs | 3 +- Source/Dafny/Rewriter.cs | 23 +++-- Test/VerifyThis2015/Problem1.dfy | 8 -- Test/dafny0/InductivePredicates.dfy | 41 ++++---- Test/dafny0/InductivePredicates.dfy.expect | 6 +- Test/dafny0/Parallel.dfy | 146 ++++++++++++++--------------- Test/dafny3/Filter.dfy | 16 ++-- Test/dafny3/GenericSort.dfy | 56 +++++------ Test/dafny3/InfiniteTrees.dfy | 44 +++------ Test/dafny4/ACL2-extractor.dfy | 8 +- Test/dafny4/KozenSilva.dfy | 80 ++++++++-------- Test/dafny4/KozenSilva.dfy.expect | 2 +- Test/dafny4/NipkowKlein-chapter7.dfy | 46 +-------- Test/dafny4/NumberRepresentations.dfy | 13 ++- Test/vstte2012/Tree.dfy | 32 +++---- 15 files changed, 236 insertions(+), 288 deletions(-) (limited to 'Test/dafny4/NumberRepresentations.dfy') diff --git a/Source/Dafny/Printer.cs b/Source/Dafny/Printer.cs index 1f63d769..6a6a76ba 100644 --- a/Source/Dafny/Printer.cs +++ b/Source/Dafny/Printer.cs @@ -438,6 +438,7 @@ namespace Microsoft.Dafny { public void PrintAttributes(Attributes a) { if (a != null) { PrintAttributes(a.Prev); + wr.Write(" "); PrintOneAttribute(a); } } @@ -445,7 +446,7 @@ namespace Microsoft.Dafny { Contract.Requires(a != null); var name = nameSubstitution ?? a.Name; var usAttribute = name.StartsWith("_"); - wr.Write(" {1}{{:{0}", name, usAttribute ? "/*" : ""); + wr.Write("{1}{{:{0}", name, usAttribute ? "/*" : ""); if (a.Args != null) { PrintAttributeArgs(a.Args, false); } diff --git a/Source/Dafny/Rewriter.cs b/Source/Dafny/Rewriter.cs index 4ac709f6..f107a819 100644 --- a/Source/Dafny/Rewriter.cs +++ b/Source/Dafny/Rewriter.cs @@ -1336,25 +1336,25 @@ namespace Microsoft.Dafny } void ComputeLemmaInduction(Method method) { Contract.Requires(method != null); - if (method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && !(method is FixpointLemma)) { - var posts = new List(); - method.Ens.ForEach(mfe => posts.Add(mfe.E)); - ComputeInductionVariables(method.tok, method.Ins, posts, ref method.Attributes); + if (method.Body != null && method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && !(method is FixpointLemma)) { + var specs = new List(); + method.Req.ForEach(mfe => specs.Add(mfe.E)); + method.Ens.ForEach(mfe => specs.Add(mfe.E)); + ComputeInductionVariables(method.tok, method.Ins, specs, method, ref method.Attributes); } } - void ComputeInductionVariables(IToken tok, List boundVars, List searchExprs, ref Attributes attributes) where VarType : class, IVariable { + void ComputeInductionVariables(IToken tok, List boundVars, List searchExprs, Method lemma, ref Attributes attributes) where VarType : class, IVariable { Contract.Requires(tok != null); Contract.Requires(boundVars != null); Contract.Requires(searchExprs != null); Contract.Requires(DafnyOptions.O.Induction != 0); - bool forLemma = boundVars is List; var args = Attributes.FindExpressions(attributes, "induction"); // we only look at the first one we find, since it overrides any other ones if (args == null) { if (DafnyOptions.O.Induction < 2) { // No explicit induction variables and we're asked not to infer anything, so we're done return; - } else if (DafnyOptions.O.Induction == 2 && forLemma) { + } else if (DafnyOptions.O.Induction == 2 && lemma != null) { // We're asked to infer induction variables only for quantifiers, not for lemmas return; } @@ -1384,13 +1384,13 @@ namespace Microsoft.Dafny } if (0 <= boundVars.FindIndex(v => v == ie.Var)) { Resolver.Warning(arg.tok, "{0}s given as :induction arguments must be given in the same order as in the {1}; ignoring attribute", - forLemma ? "lemma parameter" : "bound variable", forLemma ? "lemma" : "quantifier"); + lemma != null ? "lemma parameter" : "bound variable", lemma != null ? "lemma" : "quantifier"); return; } } Resolver.Warning(arg.tok, "invalid :induction attribute argument; expected {0}{1}; ignoring attribute", i == 0 ? "'false' or 'true' or " : "", - forLemma ? "lemma parameter" : "bound variable"); + lemma != null ? "lemma parameter" : "bound variable"); return; } // The argument list was legal, so let's use it for the _induction attribute @@ -1410,6 +1410,9 @@ namespace Microsoft.Dafny attributes = new Attributes("_induction", inductionVariables, attributes); // And since we're inferring something, let's also report that in a hover text. var s = Printer.OneAttributeToString(attributes, "induction"); + if (lemma is PrefixLemma) { + s = lemma.Name + " " + s; + } Resolver.ReportAdditionalInformation(tok, s); } } @@ -1423,7 +1426,7 @@ namespace Microsoft.Dafny protected override void VisitOneExpr(Expression expr) { var q = expr as QuantifierExpr; if (q != null) { - IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List() { q.LogicalBody() }, ref q.Attributes); + IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List() { q.LogicalBody() }, null, ref q.Attributes); } } void VisitInductionStmt(Statement stmt) { diff --git a/Test/VerifyThis2015/Problem1.dfy b/Test/VerifyThis2015/Problem1.dfy index 2e8a5243..67eeba07 100644 --- a/Test/VerifyThis2015/Problem1.dfy +++ b/Test/VerifyThis2015/Problem1.dfy @@ -161,18 +161,13 @@ lemma Same2(pat: seq, a: seq) ensures IRP_Alt(pat, a) { if pat == [] { - assert pat <= a; } else if a != [] && pat[0] == a[0] { - assert IsRelaxedPrefixAux(pat[1..], a[1..], 1); - Same2(pat[1..], a[1..]); if pat[1..] <= a[1..] { - assert pat <= a; } else { var k :| 0 <= k < |pat[1..]| && pat[1..][..k] + pat[1..][k+1..] <= a[1..]; assert 0 <= k+1 < |pat| && pat[..k+1] + pat[k+2..] <= a; } } else { - assert IsRelaxedPrefixAux(pat[1..], a, 0); Same2_Prefix(pat[1..], a); assert pat[1..] <= a; assert 0 <= 0 < |pat| && pat[..0] + pat[0+1..] <= a; @@ -182,7 +177,4 @@ lemma Same2_Prefix(pat: seq, a: seq) requires IsRelaxedPrefixAux(pat, a, 0) ensures pat <= a { - if pat != [] { - Same2_Prefix(pat[1..], a[1..]); - } } diff --git a/Test/dafny0/InductivePredicates.dfy b/Test/dafny0/InductivePredicates.dfy index 424118e7..8d05af11 100644 --- a/Test/dafny0/InductivePredicates.dfy +++ b/Test/dafny0/InductivePredicates.dfy @@ -18,7 +18,7 @@ lemma M(x: natinf) } // yay! my first proof involving an inductive predicate :) -lemma M'(k: nat, x: natinf) +lemma {:induction false} M'(k: nat, x: natinf) requires Even#[k](x) ensures x.N? && x.n % 2 == 0 { @@ -32,8 +32,14 @@ lemma M'(k: nat, x: natinf) } } +lemma M'_auto(k: nat, x: natinf) + requires Even#[k](x) + ensures x.N? && x.n % 2 == 0 +{ +} + // Here is the same proof as in M / M', but packaged into a single "inductive lemma": -inductive lemma IL(x: natinf) +inductive lemma {:induction false} IL(x: natinf) requires Even(x) ensures x.N? && x.n % 2 == 0 { @@ -45,7 +51,7 @@ inductive lemma IL(x: natinf) } } -inductive lemma IL_EvenBetter(x: natinf) +inductive lemma {:induction false} IL_EvenBetter(x: natinf) requires Even(x) ensures x.N? && x.n % 2 == 0 { @@ -57,6 +63,12 @@ inductive lemma IL_EvenBetter(x: natinf) } } +inductive lemma IL_Best(x: natinf) + requires Even(x) + ensures x.N? && x.n % 2 == 0 +{ +} + inductive lemma IL_Bad(x: natinf) requires Even(x) ensures x.N? && x.n % 2 == 0 @@ -107,7 +119,7 @@ module Alt { { match x case N(n) => N(n+1) - case Inf => Inf + case Inf => Inf } inductive predicate Even(x: natinf) @@ -116,7 +128,7 @@ module Alt { exists y :: x == S(S(y)) && Even(y) } - inductive lemma MyLemma_NotSoNice(x: natinf) + inductive lemma {:induction false} MyLemma_NotSoNice(x: natinf) requires Even(x) ensures x.N? && x.n % 2 == 0 { @@ -130,7 +142,7 @@ module Alt { } } - inductive lemma MyLemma_NiceButNotFast(x: natinf) + inductive lemma {:induction false} MyLemma_NiceButNotFast(x: natinf) requires Even(x) ensures x.N? && x.n % 2 == 0 { @@ -143,7 +155,13 @@ module Alt { assert x.n == y.n + 2; } } - + + inductive lemma MyLemma_RealNice_AndFastToo(x: natinf) + requires Even(x) + ensures x.N? && x.n % 2 == 0 + { + } + lemma InfNotEven() ensures !Even(Inf) { @@ -156,15 +174,6 @@ module Alt { requires Even(Inf) ensures false { - var x := Inf; - if { - case x.N? && x.n == 0 => - assert false; // this case is absurd - case exists y :: x == S(S(y)) && Even(y) => - var y :| x == S(S(y)) && Even(y); - assert y == Inf; - InfNotEven_Aux(); - } } lemma NextEven(x: natinf) diff --git a/Test/dafny0/InductivePredicates.dfy.expect b/Test/dafny0/InductivePredicates.dfy.expect index ccf30643..48beade5 100644 --- a/Test/dafny0/InductivePredicates.dfy.expect +++ b/Test/dafny0/InductivePredicates.dfy.expect @@ -1,9 +1,9 @@ -InductivePredicates.dfy(64,9): Error: assertion violation +InductivePredicates.dfy(76,9): Error: assertion violation Execution trace: (0,0): anon0 (0,0): anon3_Then -InductivePredicates.dfy(76,10): Error: assertion violation +InductivePredicates.dfy(88,10): Error: assertion violation Execution trace: (0,0): anon0 -Dafny program verifier finished with 29 verified, 2 errors +Dafny program verifier finished with 35 verified, 2 errors diff --git a/Test/dafny0/Parallel.dfy b/Test/dafny0/Parallel.dfy index 030eb350..e0d6491b 100644 --- a/Test/dafny0/Parallel.dfy +++ b/Test/dafny0/Parallel.dfy @@ -2,13 +2,13 @@ // RUN: %diff "%s.expect" "%t" class C { - var data: int; - var n: nat; - var st: set; + var data: int + var n: nat + var st: set ghost method CLemma(k: int) - requires k != -23; - ensures data < k; // magic, isn't it (or bogus, some would say) + requires k != -23 + ensures data < k // magic, isn't it (or bogus, some would say) } // This method more or less just tests the syntax, resolution, and basic verification @@ -19,31 +19,31 @@ method ParallelStatement_Resolve( S: set, clx: C, cly: C, clk: int ) - requires a != null && null !in spine; - modifies a, spine; + requires a != null && null !in spine + modifies a, spine { - forall (i: int | 0 <= i < a.Length && i % 2 == 0) { + forall i | 0 <= i < a.Length && i % 2 == 0 { a[i] := a[(i + 1) % a.Length] + 3; } - forall (o | o in spine) { + forall o | o in spine { o.st := o.st + Repr; } - forall (x, y | x in S && 0 <= y+x < 100) { + forall x, y | x in S && 0 <= y+x < 100 { Lemma(clx, x, y); // error: precondition does not hold (clx may be null) } - forall (x, y | x in S && 0 <= y+x < 100) { + forall x, y | x in S && 0 <= y+x < 100 { cly.CLemma(x + y); // error: receiver might be null } - forall (p | 0 <= p) - ensures F(p) <= Sum(p) + p - 1; // error (no connection is known between F and Sum) + forall p | 0 <= p + ensures F(p) <= Sum(p) + p - 1 // error (no connection is known between F and Sum) { assert 0 <= G(p); ghost var t; - if (p % 2 == 0) { + if p % 2 == 0 { assert G(p) == F(p+2); // error (there's nothing that gives any relation between F and G) t := p+p; } else { @@ -56,11 +56,11 @@ method ParallelStatement_Resolve( } } -ghost method Lemma(c: C, x: int, y: int) - requires c != null; - ensures c.data <= x+y; -ghost method PowerLemma(x: int, y: int) - ensures Pred(x, y); +lemma Lemma(c: C, x: int, y: int) + requires c != null + ensures c.data <= x+y +lemma PowerLemma(x: int, y: int) + ensures Pred(x, y) function F(x: int): int function G(x: int): nat @@ -71,54 +71,54 @@ function Pred(x: int, y: int): bool // --------------------------------------------------------------------- method M0(S: set) - requires null !in S; - modifies S; - ensures forall o :: o in S ==> o.data == 85; - ensures forall o :: o != null && o !in S ==> o.data == old(o.data); + requires null !in S + modifies S + ensures forall o :: o in S ==> o.data == 85 + ensures forall o :: o != null && o !in S ==> o.data == old(o.data) { - forall (s | s in S) { + forall s | s in S { s.data := 85; } } method M1(S: set, x: C) - requires null !in S && x in S; + requires null !in S && x in S { - forall (s | s in S) - ensures s.data < 100; + forall s | s in S + ensures s.data < 100 { assume s.data == 85; } - if (*) { + if * { assert x.data == 85; // error (cannot be inferred from forall ensures clause) } else { assert x.data < 120; } - forall (s | s in S) - ensures s.data < 70; // error + forall s | s in S + ensures s.data < 70 // error { assume s.data == 85; } } method M2() returns (a: array) - ensures a != null; - ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j]; + ensures a != null + ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j] { a := new int[250]; - forall (i: nat | i < 125) { + forall i: nat | i < 125 { a[i] := 423; } - forall (i | 125 <= i < 250) { + forall i | 125 <= i < 250 { a[i] := 300 + i; } } method M4(S: set, k: int) - modifies S; + modifies S { - forall (s | s in S && s != null) { + forall s | s in S && s != null { s.n := k; // error: k might be negative } } @@ -127,25 +127,25 @@ method M5() { if { case true => - forall (x | 0 <= x < 100) { + forall x | 0 <= x < 100 { PowerLemma(x, x); } assert Pred(34, 34); case true => - forall (x,y | 0 <= x < 100 && y == x+1) { + forall x,y | 0 <= x < 100 && y == x+1 { PowerLemma(x, y); } assert Pred(34, 35); case true => - forall (x,y | 0 <= x < y < 100) { + forall x,y | 0 <= x < y < 100 { PowerLemma(x, y); } assert Pred(34, 35); case true => - forall (x | x in set k | 0 <= k < 100) { + forall x | x in set k | 0 <= k < 100 { PowerLemma(x, x); } assert Pred(34, 34); @@ -155,22 +155,22 @@ method M5() method Main() { var a := new int[180]; - forall (i | 0 <= i < 180) { + forall i | 0 <= i < 180 { a[i] := 2*i + 100; } var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5]; - forall (i | 0 <= i < |sq|) { + forall i | 0 <= i < |sq| { a[20+i] := sq[i]; } - forall (t | t in sq) { + forall t | t in sq { a[t] := 1000; } - forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) { + forall t,u | t in sq && t < 4 && 10 <= u < 10+t { a[u] := 6000 + t; } var k := 0; - while (k < 180) { - if (k != 0) { print ", "; } + while k < 180 { + if k != 0 { print ", "; } print a[k]; k := k + 1; } @@ -180,50 +180,50 @@ method Main() method DuplicateUpdate() { var a := new int[180]; var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5]; - if (*) { - forall (t,u | t in sq && 10 <= u < 10+t) { + if * { + forall t,u | t in sq && 10 <= u < 10+t { a[u] := 6000 + t; // error: a[10] (and a[11]) are assigned more than once } } else { - forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) { + forall t,u | t in sq && t < 4 && 10 <= u < 10+t { a[u] := 6000 + t; // with the 't < 4' conjunct in the line above, this is fine } } } -ghost method DontDoMuch(x: int) +lemma DontDoMuch(x: int) { } method OmittedRange() { - forall (x: int) { } // a type is still needed for the bound variable - forall (x) { + forall x: int { } // a type is still needed for the bound variable + forall x { DontDoMuch(x); } } // ----------------------- two-state postconditions --------------------------------- -class TwoState_C { ghost var data: int; } +class TwoState_C { ghost var data: int } // It is not possible to achieve this postcondition in a ghost method, because ghost // contexts are not allowed to allocate state. Callers of this ghost method will know // that the postcondition is tantamount to 'false'. ghost method TwoState0(y: int) - ensures exists o: TwoState_C :: o != null && fresh(o); + ensures exists o: TwoState_C :: o != null && fresh(o) method TwoState_Main0() { - forall (x) { TwoState0(x); } + forall x { TwoState0(x); } assert false; // no prob, because the postcondition of TwoState0 implies false } method X_Legit(c: TwoState_C) - requires c != null; - modifies c; + requires c != null + modifies c { c.data := c.data + 1; - forall (x | c.data <= x) - ensures old(c.data) < x; // note that the 'old' refers to the method's initial state + forall x | c.data <= x + ensures old(c.data) < x // note that the 'old' refers to the method's initial state { } } @@ -235,8 +235,8 @@ method X_Legit(c: TwoState_C) // method, not the beginning of the 'forall' statement. method TwoState_Main2() { - forall (x: int) - ensures exists o: TwoState_C :: o != null && fresh(o); + forall x: int + ensures exists o: TwoState_C :: o != null && fresh(o) { TwoState0(x); } @@ -251,8 +251,8 @@ method TwoState_Main2() // statement's effect on the heap is not optimized away. method TwoState_Main3() { - forall (x: int) - ensures exists o: TwoState_C :: o != null && fresh(o); + forall x: int + ensures exists o: TwoState_C :: o != null && fresh(o) { assume false; // (there's no other way to achieve this forall-statement postcondition) } @@ -262,11 +262,11 @@ method TwoState_Main3() // ------- empty forall statement ----------------------------------------- class EmptyForallStatement { - var emptyPar: int; + var emptyPar: int method Empty_Parallel0() - modifies this; - ensures emptyPar == 8; + modifies this + ensures emptyPar == 8 { forall () { this.emptyPar := 8; @@ -274,11 +274,11 @@ class EmptyForallStatement { } function EmptyPar_P(x: int): bool - ghost method EmptyPar_Lemma(x: int) - ensures EmptyPar_P(x); + lemma EmptyPar_Lemma(x: int) + ensures EmptyPar_P(x) method Empty_Parallel1() - ensures EmptyPar_P(8); + ensures EmptyPar_P(8) { forall { EmptyPar_Lemma(8); @@ -288,7 +288,7 @@ class EmptyForallStatement { method Empty_Parallel2() { forall - ensures exists k :: EmptyPar_P(k); + ensures exists k :: EmptyPar_P(k) { var y := 8; assume EmptyPar_P(y); @@ -312,9 +312,9 @@ predicate ThProperty(step: nat, t: Nat, r: nat) case Succ(o) => step>0 && exists ro:nat :: ThProperty(step-1, o, ro) } -ghost method Th(step: nat, t: Nat, r: nat) - requires t.Succ? && ThProperty(step, t, r); +lemma Th(step: nat, t: Nat, r: nat) + requires t.Succ? && ThProperty(step, t, r) // the next line follows from the precondition and the definition of ThProperty - ensures exists ro:nat :: ThProperty(step-1, t.tail, ro); + ensures exists ro:nat :: ThProperty(step-1, t.tail, ro) { } diff --git a/Test/dafny3/Filter.dfy b/Test/dafny3/Filter.dfy index 24c8e94e..6e67de26 100644 --- a/Test/dafny3/Filter.dfy +++ b/Test/dafny3/Filter.dfy @@ -35,7 +35,7 @@ lemma Lemma_TailSubStreamK(s: Stream, u: Stream, k: nat) // this lemma could ha { if k != 0 { Lemma_InTail(s.head, u); - Lemma_TailSubStreamK(s.tail, u, k-1); + //Lemma_TailSubStreamK(s.tail, u, k-1); } } lemma Lemma_InSubStream(x: T, s: Stream, u: Stream) @@ -193,10 +193,10 @@ lemma FS_Ping(s: Stream, h: PredicateHandle, x: T) } lemma FS_Pong(s: Stream, h: PredicateHandle, x: T, k: nat) - requires AlwaysAnother(s, h) && In(x, s) && P(x, h); - requires Tail(s, k).head == x; - ensures In(x, Filter(s, h)); - decreases k; + requires AlwaysAnother(s, h) && In(x, s) && P(x, h) + requires Tail(s, k).head == x + ensures In(x, Filter(s, h)) + decreases k { var fs := Filter(s, h); if s.head == x { @@ -205,14 +205,14 @@ lemma FS_Pong(s: Stream, h: PredicateHandle, x: T, k: nat) assert fs == Cons(s.head, Filter(s.tail, h)); // reminder of where we are calc { true; - == { FS_Pong(s.tail, h, x, k-1); } + //== { FS_Pong(s.tail, h, x, k-1); } In(x, Filter(s.tail, h)); ==> { assert fs.head != x; Lemma_InTail(x, fs); } In(x, fs); } } else { - assert fs == Filter(s.tail, h); // reminder of where we are - FS_Pong(s.tail, h, x, k-1); + //assert fs == Filter(s.tail, h); // reminder of where we are + //FS_Pong(s.tail, h, x, k-1); } } diff --git a/Test/dafny3/GenericSort.dfy b/Test/dafny3/GenericSort.dfy index 7ea038be..6bd06965 100644 --- a/Test/dafny3/GenericSort.dfy +++ b/Test/dafny3/GenericSort.dfy @@ -7,25 +7,25 @@ abstract module TotalOrder { // Three properties of total orders. Here, they are given as unproved // lemmas, that is, as axioms. lemma Antisymmetry(a: T, b: T) - requires Leq(a, b) && Leq(b, a); - ensures a == b; + requires Leq(a, b) && Leq(b, a) + ensures a == b lemma Transitivity(a: T, b: T, c: T) - requires Leq(a, b) && Leq(b, c); - ensures Leq(a, c); + requires Leq(a, b) && Leq(b, c) + ensures Leq(a, c) lemma Totality(a: T, b: T) - ensures Leq(a, b) || Leq(b, a); + ensures Leq(a, b) || Leq(b, a) } abstract module Sort { import O as TotalOrder // let O denote some module that has the members of TotalOrder predicate Sorted(a: array, low: int, high: int) - requires a != null && 0 <= low <= high <= a.Length; - reads a; + requires a != null && 0 <= low <= high <= a.Length + reads a // The body of the predicate is hidden outside the module, but the postcondition is // "exported" (that is, visible, known) outside the module. Here, we choose the // export the following property: - ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j]); + ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j]) { forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j]) } @@ -33,18 +33,18 @@ abstract module Sort { // In the insertion sort routine below, it's more convenient to keep track of only that // neighboring elements of the array are sorted... predicate NeighborSorted(a: array, low: int, high: int) - requires a != null && 0 <= low <= high <= a.Length; - reads a; + requires a != null && 0 <= low <= high <= a.Length + reads a { forall i :: low < i < high ==> O.Leq(a[i-1], a[i]) } // ...but we show that property to imply all pairs to be sorted. The proof of this // lemma uses the transitivity property. lemma NeighborSorted_implies_Sorted(a: array, low: int, high: int) - requires a != null && 0 <= low <= high <= a.Length; - requires NeighborSorted(a, low, high); - ensures Sorted(a, low, high); - decreases high - low; + requires a != null && 0 <= low <= high <= a.Length + requires NeighborSorted(a, low, high) + ensures Sorted(a, low, high) + decreases high - low { if low != high { NeighborSorted_implies_Sorted(a, low+1, high); @@ -57,25 +57,25 @@ abstract module Sort { // Standard insertion sort method method InsertionSort(a: array) - requires a != null; - modifies a; - ensures Sorted(a, 0, a.Length); - ensures multiset(a[..]) == old(multiset(a[..])); + requires a != null + modifies a + ensures Sorted(a, 0, a.Length) + ensures multiset(a[..]) == old(multiset(a[..])) { if a.Length == 0 { return; } var i := 1; while i < a.Length - invariant 0 < i <= a.Length; - invariant NeighborSorted(a, 0, i); - invariant multiset(a[..]) == old(multiset(a[..])); + invariant 0 < i <= a.Length + invariant NeighborSorted(a, 0, i) + invariant multiset(a[..]) == old(multiset(a[..])) { var j := i; while 0 < j && !O.Leq(a[j-1], a[j]) - invariant 0 <= j <= i; - invariant NeighborSorted(a, 0, j); - invariant NeighborSorted(a, j, i+1); - invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1]); - invariant multiset(a[..]) == old(multiset(a[..])); + invariant 0 <= j <= i + invariant NeighborSorted(a, 0, j) + invariant NeighborSorted(a, j, i+1) + invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1]) + invariant multiset(a[..]) == old(multiset(a[..])) { // The proof of correctness uses the totality property here. // It implies that if two elements are not previously in @@ -97,7 +97,7 @@ module IntOrder refines TotalOrder { datatype T = Int(i: int) // Define the ordering on these integers predicate method Leq ... - ensures Leq(a, b) ==> a.i <= b.i; + ensures Leq(a, b) ==> a.i <= b.i { a.i <= b.i } @@ -156,7 +156,7 @@ module intOrder refines TotalOrder { type T = int // Define the ordering on these integers predicate method Leq ... - ensures Leq(a, b) ==> a <= b; + ensures Leq(a, b) ==> a <= b { a <= b } diff --git a/Test/dafny3/InfiniteTrees.dfy b/Test/dafny3/InfiniteTrees.dfy index 516a9e4e..85f73bc3 100644 --- a/Test/dafny3/InfiniteTrees.dfy +++ b/Test/dafny3/InfiniteTrees.dfy @@ -286,7 +286,7 @@ lemma Theorem1_Lemma(t: Tree, n: nat, p: Stream) LowerThan(ch, n); ==> // def. LowerThan LowerThan(ch.head.children, n-1); - ==> { Theorem1_Lemma(ch.head, n-1, tail); } + ==> //{ Theorem1_Lemma(ch.head, n-1, tail); } !IsNeverEndingStream(tail); ==> // def. IsNeverEndingStream !IsNeverEndingStream(p); @@ -374,30 +374,30 @@ lemma Proposition3b() } } lemma Proposition3b_Lemma(t: Tree, h: nat, p: Stream) - requires LowerThan(t.children, h) && ValidPath(t, p); - ensures !IsNeverEndingStream(p); - decreases h; + requires LowerThan(t.children, h) && ValidPath(t, p) + ensures !IsNeverEndingStream(p) + decreases h { match p { case Nil => case Cons(index, tail) => // From the definition of ValidPath(t, p), we get the following: var ch := Tail(t.children, index); - assert ch.Cons? && ValidPath(ch.head, tail); + // assert ch.Cons? && ValidPath(ch.head, tail); // From the definition of LowerThan(t.children, h), we get the following: match t.children { case Nil => ValidPath_Lemma(p); assert false; // absurd case case Cons(_, _) => - assert 1 <= h; + // assert 1 <= h; LowerThan_Lemma(t.children, index, h); - assert LowerThan(ch, h); + // assert LowerThan(ch, h); } // Putting these together, by ch.Cons? and the definition of LowerThan(ch, h), we get: - assert LowerThan(ch.head.children, h-1); + // assert LowerThan(ch.head.children, h-1); // And now we can invoke the induction hypothesis: - Proposition3b_Lemma(ch.head, h-1, tail); + // Proposition3b_Lemma(ch.head, h-1, tail); } } @@ -627,30 +627,10 @@ colemma Path_Lemma2'(p: Stream) } } colemma Path_Lemma2''(p: Stream, n: nat, tail: Stream) - requires IsNeverEndingStream(p) && p.tail == tail; - ensures InfinitePath'(S2N'(n, tail)); + requires IsNeverEndingStream(p) && p.tail == tail + ensures InfinitePath'(S2N'(n, tail)) { - if n <= 0 { - calc { - InfinitePath'#[_k](S2N'(n, tail)); - // def. S2N' - InfinitePath'#[_k](Zero(S2N(tail))); - // def. InfinitePath' - InfinitePath#[_k-1](S2N(tail)); - { Path_Lemma2'(tail); } - true; - } - } else { - calc { - InfinitePath'#[_k](S2N'(n, tail)); - // def. S2N' - InfinitePath'#[_k](Succ(S2N'(n-1, tail))); - // def. InfinitePath' - InfinitePath'#[_k-1](S2N'(n-1, tail)); - { Path_Lemma2''(p, n-1, tail); } - true; - } - } + Path_Lemma2'(tail); } lemma Path_Lemma3(r: CoOption) ensures InfinitePath(r) ==> IsNeverEndingStream(N2S(r)); diff --git a/Test/dafny4/ACL2-extractor.dfy b/Test/dafny4/ACL2-extractor.dfy index 8fe98531..bd2c9d83 100644 --- a/Test/dafny4/ACL2-extractor.dfy +++ b/Test/dafny4/ACL2-extractor.dfy @@ -116,9 +116,9 @@ lemma RevLength(xs: List) // you can prove two lists equal by proving their elements equal lemma EqualElementsMakeEqualLists(xs: List, ys: List) - requires length(xs) == length(ys); - requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys); - ensures xs == ys; + requires length(xs) == length(ys) + requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys) + ensures xs == ys { match xs { case Nil => @@ -132,7 +132,7 @@ lemma EqualElementsMakeEqualLists(xs: List, ys: List) nth(i+1, xs) == nth(i+1, ys); } } - EqualElementsMakeEqualLists(xs.tail, ys.tail); + // EqualElementsMakeEqualLists(xs.tail, ys.tail); } } diff --git a/Test/dafny4/KozenSilva.dfy b/Test/dafny4/KozenSilva.dfy index af0cdc71..ef49b10f 100644 --- a/Test/dafny4/KozenSilva.dfy +++ b/Test/dafny4/KozenSilva.dfy @@ -25,8 +25,8 @@ copredicate LexLess(s: Stream, t: Stream) // A co-lemma is used to establish the truth of a co-predicate. colemma Theorem1_LexLess_Is_Transitive(s: Stream, t: Stream, u: Stream) - requires LexLess(s, t) && LexLess(t, u); - ensures LexLess(s, u); + requires LexLess(s, t) && LexLess(t, u) + ensures LexLess(s, u) { // Here is the proof, which is actually a body of code. It lends itself to a // simple, intuitive co-inductive reading. For a theorem this simple, this simple @@ -38,6 +38,14 @@ colemma Theorem1_LexLess_Is_Transitive(s: Stream, t: Stream, u: Stream } } +// Actually, Dafny can do the proof of the previous lemma completely automatically. Here it is: +colemma Theorem1_LexLess_Is_Transitive_Automatic(s: Stream, t: Stream, u: Stream) + requires LexLess(s, t) && LexLess(t, u) + ensures LexLess(s, u) +{ + // no manual proof needed, so the body of the co-lemma is empty +} + // The following predicate captures the (inductively defined) negation of (the // co-inductively defined) LexLess above. predicate NotLexLess(s: Stream, t: Stream) @@ -51,7 +59,7 @@ predicate NotLexLess'(k: nat, s: Stream, t: Stream) } lemma EquivalenceTheorem(s: Stream, t: Stream) - ensures LexLess(s, t) <==> !NotLexLess(s, t); + ensures LexLess(s, t) <==> !NotLexLess(s, t) { if !NotLexLess(s, t) { EquivalenceTheorem0(s, t); @@ -61,8 +69,8 @@ lemma EquivalenceTheorem(s: Stream, t: Stream) } } colemma EquivalenceTheorem0(s: Stream, t: Stream) - requires !NotLexLess(s, t); - ensures LexLess(s, t); + requires !NotLexLess(s, t) + ensures LexLess(s, t) { // Here, more needs to be said about the way Dafny handles co-lemmas. // The way a co-lemma establishes a co-predicate is to prove, by induction, @@ -74,14 +82,14 @@ colemma EquivalenceTheorem0(s: Stream, t: Stream) // indicates a finite unrolling of a co-inductive predicate. In particular, // LexLess#[k] refers to k unrollings of LexLess. lemma EquivalenceTheorem0_Lemma(k: nat, s: Stream, t: Stream) - requires !NotLexLess'(k, s, t); - ensures LexLess#[k](s, t); + requires !NotLexLess'(k, s, t) + ensures LexLess#[k](s, t) { // This simple inductive proof is done completely automatically by Dafny. } lemma EquivalenceTheorem1(s: Stream, t: Stream) - requires LexLess(s, t); - ensures !NotLexLess(s, t); + requires LexLess(s, t) + ensures !NotLexLess(s, t) { // The forall statement in Dafny is used, here, as universal introduction: // what EquivalenceTheorem1_Lemma establishes for one k, the forall @@ -91,22 +99,22 @@ lemma EquivalenceTheorem1(s: Stream, t: Stream) } } lemma EquivalenceTheorem1_Lemma(k: nat, s: Stream, t: Stream) - requires LexLess(s, t); - ensures !NotLexLess'(k, s, t); + requires LexLess(s, t) + ensures !NotLexLess'(k, s, t) { } lemma Theorem1_Alt(s: Stream, t: Stream, u: Stream) - requires NotLexLess(s, u); - ensures NotLexLess(s, t) || NotLexLess(t, u); + requires NotLexLess(s, u) + ensures NotLexLess(s, t) || NotLexLess(t, u) { forall k: nat | NotLexLess'(k, s, u) { Theorem1_Alt_Lemma(k, s, t, u); } } lemma Theorem1_Alt_Lemma(k: nat, s: Stream, t: Stream, u: Stream) - requires NotLexLess'(k, s, u); - ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u); + requires NotLexLess'(k, s, u) + ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u) { } @@ -116,8 +124,8 @@ function PointwiseAdd(s: Stream, t: Stream): Stream } colemma Theorem2_Pointwise_Addition_Is_Monotone(s: Stream, t: Stream, u: Stream, v: Stream) - requires LexLess(s, t) && LexLess(u, v); - ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v)); + requires LexLess(s, t) && LexLess(u, v) + ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v)) { // The co-lemma will establish the co-inductive predicate by establishing // all finite unrollings thereof. Each finite unrolling is proved by @@ -148,15 +156,9 @@ copredicate Subtype(a: RecType, b: RecType) } colemma Theorem3_Subtype_Is_Transitive(a: RecType, b: RecType, c: RecType) - requires Subtype(a, b) && Subtype(b, c); - ensures Subtype(a, c); -{ - if a == Bottom || c == Top { - // done - } else { - Theorem3_Subtype_Is_Transitive(c.dom, b.dom, a.dom); - Theorem3_Subtype_Is_Transitive(a.ran, b.ran, c.ran); - } + requires Subtype(a, b) && Subtype(b, c) + ensures Subtype(a, c) +{ } // -------------------------------------------------------------------------- @@ -184,16 +186,16 @@ copredicate ValBelow(u: Val, v: Val) } colemma Theorem4a_ClEnvBelow_Is_Transitive(c: ClEnv, d: ClEnv, e: ClEnv) - requires ClEnvBelow(c, d) && ClEnvBelow(d, e); - ensures ClEnvBelow(c, e); + requires ClEnvBelow(c, d) && ClEnvBelow(d, e) + ensures ClEnvBelow(c, e) { forall y | y in c.m { Theorem4b_ValBelow_Is_Transitive#[_k-1](c.m[y], d.m[y], e.m[y]); } } colemma Theorem4b_ValBelow_Is_Transitive(u: Val, v: Val, w: Val) - requires ValBelow(u, v) && ValBelow(v, w); - ensures ValBelow(u, w); + requires ValBelow(u, v) && ValBelow(v, w) + ensures ValBelow(u, w) { if u.ValCl? { Theorem4a_ClEnvBelow_Is_Transitive(u.cl.env, v.cl.env, w.cl.env); @@ -209,7 +211,7 @@ predicate IsCapsule(cap: Capsule) } function ClosureConversion(cap: Capsule): Cl - requires IsCapsule(cap); + requires IsCapsule(cap) { Closure(cap.e.abs, ClosureConvertedMap(cap.s)) // In the Kozen and Silva paper, there are more conditions, having to do with free variables, @@ -229,8 +231,8 @@ predicate CapsuleEnvironmentBelow(s: map, t: map, t: map) - requires CapsuleEnvironmentBelow(s, t); - ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t)); + requires CapsuleEnvironmentBelow(s, t) + ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t)) { } @@ -251,8 +253,8 @@ copredicate Bisim(s: Stream, t: Stream) } colemma Theorem6_Bisim_Is_Symmetric(s: Stream, t: Stream) - requires Bisim(s, t); - ensures Bisim(t, s); + requires Bisim(s, t) + ensures Bisim(t, s) { // proof is automatic } @@ -270,8 +272,8 @@ function merge(s: Stream, t: Stream): Stream // In general, the termination argument needs to be supplied explicitly in terms // of a metric, rank, variant function, or whatever you want to call it--a // "decreases" clause in Dafny. Dafny provides some help in making up "decreases" -// clauses, and in this case it automatically adds "decreases 0;" to SplitLeft -// and "decreases 1;" to SplitRight. With these "decreases" clauses, the +// clauses, and in this case it automatically adds "decreases 0" to SplitLeft +// and "decreases 1" to SplitRight. With these "decreases" clauses, the // termination check of SplitRight's call to SplitLeft will simply be "0 < 1", // which is trivial to check. function SplitLeft(s: Stream): Stream @@ -284,7 +286,7 @@ function SplitRight(s: Stream): Stream } colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream) - ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s); + ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s) { var LHS := merge(SplitLeft(s), SplitRight(s)); // The construct that follows is a "calc" statement. It gives a way to write an @@ -318,7 +320,7 @@ colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream) } colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Equal(s: Stream) - ensures merge(SplitLeft(s), SplitRight(s)) == s; + ensures merge(SplitLeft(s), SplitRight(s)) == s { // The proof of this co-lemma is actually done completely automatically (so the // body of this co-lemma can be empty). However, just to show what the calculations diff --git a/Test/dafny4/KozenSilva.dfy.expect b/Test/dafny4/KozenSilva.dfy.expect index c6c90498..90432af3 100644 --- a/Test/dafny4/KozenSilva.dfy.expect +++ b/Test/dafny4/KozenSilva.dfy.expect @@ -1,2 +1,2 @@ -Dafny program verifier finished with 47 verified, 0 errors +Dafny program verifier finished with 49 verified, 0 errors diff --git a/Test/dafny4/NipkowKlein-chapter7.dfy b/Test/dafny4/NipkowKlein-chapter7.dfy index 7db31cbd..e694fc4b 100644 --- a/Test/dafny4/NipkowKlein-chapter7.dfy +++ b/Test/dafny4/NipkowKlein-chapter7.dfy @@ -112,13 +112,6 @@ inductive lemma lemma_7_6(b: bexp, c: com, c': com, s: state, t: state) requires big_step(While(b, c), s, t) && equiv_c(c, c') ensures big_step(While(b, c'), s, t) { - if !bval(b, s) { - // trivial - } else { - var s' :| big_step#[_k-1](c, s, s') && big_step#[_k-1](While(b, c), s', t); - lemma_7_6(b, c, c', s', t); // induction hypothesis - assert big_step(While(b, c'), s', t); - } } // equiv_c is an equivalence relation @@ -139,27 +132,7 @@ inductive lemma IMP_is_deterministic(c: com, s: state, t: state, t': state) requires big_step(c, s, t) && big_step(c, s, t') ensures t == t' { - match c - case SKIP => - // trivial - case Assign(x, a) => - // trivial - case Seq(c0, c1) => - var s' :| big_step#[_k-1](c0, s, s') && big_step#[_k-1](c1, s', t); - var s'' :| big_step#[_k-1](c0, s, s'') && big_step#[_k-1](c1, s'', t'); - IMP_is_deterministic(c0, s, s', s''); - IMP_is_deterministic(c1, s', t, t'); - case If(b, thn, els) => - IMP_is_deterministic(if bval(b, s) then thn else els, s, t, t'); - case While(b, body) => - if !bval(b, s) { - // trivial - } else { - var s' :| big_step#[_k-1](body, s, s') && big_step#[_k-1](While(b, body), s', t); - var s'' :| big_step#[_k-1](body, s, s'') && big_step#[_k-1](While(b, body), s'', t'); - IMP_is_deterministic(body, s, s', s''); - IMP_is_deterministic(While(b, body), s', t, t'); - } + // Dafny totally rocks! } // ----- Small-step semantics ----- @@ -214,12 +187,6 @@ inductive lemma star_transitive_aux(c0: com, s0: state, c1: com, s1: state, c2: requires small_step_star(c0, s0, c1, s1) ensures small_step_star(c1, s1, c2, s2) ==> small_step_star(c0, s0, c2, s2) { - if c0 == c1 && s0 == s1 { - } else { - var c', s' :| - small_step(c0, s0, c', s') && small_step_star#[_k-1](c', s', c1, s1); - star_transitive_aux(c', s', c1, s1, c2, s2); - } } // The big-step semantics can be simulated by some number of small steps @@ -240,15 +207,14 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state) small_step_star(Seq(c0, c1), s, Seq(SKIP, c1), s') && small_step_star(Seq(SKIP, c1), s', SKIP, t); { lemma_7_13(c0, s, SKIP, s', c1); } small_step_star(c0, s, SKIP, s') && small_step_star(Seq(SKIP, c1), s', SKIP, t); - { BigStep_implies_SmallStepStar(c0, s, s'); } + //{ BigStep_implies_SmallStepStar(c0, s, s'); } small_step_star(Seq(SKIP, c1), s', SKIP, t); { assert small_step(Seq(SKIP, c1), s', c1, s'); } small_step_star(c1, s', SKIP, t); - { BigStep_implies_SmallStepStar(c1, s', t); } + //{ BigStep_implies_SmallStepStar(c1, s', t); } true; } case If(b, thn, els) => - BigStep_implies_SmallStepStar(if bval(b, s) then thn else els, s, t); case While(b, body) => if !bval(b, s) && s == t { calc <== { @@ -271,11 +237,11 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state) small_step_star(Seq(body, While(b, body)), s, Seq(SKIP, While(b, body)), s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t); { lemma_7_13(body, s, SKIP, s', While(b, body)); } small_step_star(body, s, SKIP, s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t); - { BigStep_implies_SmallStepStar(body, s, s'); } + //{ BigStep_implies_SmallStepStar(body, s, s'); } small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t); { assert small_step(Seq(SKIP, While(b, body)), s', While(b, body), s'); } small_step_star(While(b, body), s', SKIP, t); - { BigStep_implies_SmallStepStar(While(b, body), s', t); } + //{ BigStep_implies_SmallStepStar(While(b, body), s', t); } true; } } @@ -299,7 +265,6 @@ inductive lemma SmallStepStar_implies_BigStep(c: com, s: state, t: state) if c == SKIP && s == t { } else { var c', s' :| small_step(c, s, c', s') && small_step_star#[_k-1](c', s', SKIP, t); - SmallStepStar_implies_BigStep(c', s', t); SmallStep_plus_BigStep(c, s, c', s', t); } } @@ -316,7 +281,6 @@ inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t: var c0' :| c' == Seq(c0', c1) && small_step(c0, s, c0', s'); if big_step(c', s', t) { var s'' :| big_step(c0', s', s'') && big_step(c1, s'', t); - SmallStep_plus_BigStep(c0, s, c0', s', s''); } } case If(b, thn, els) => diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy index 3dba6325..0d6cffa1 100644 --- a/Test/dafny4/NumberRepresentations.dfy +++ b/Test/dafny4/NumberRepresentations.dfy @@ -234,17 +234,16 @@ lemma UniqueRepresentation(a: seq, b: seq, lowDigit: int, base: nat) } lemma ZeroIsUnique(a: seq, lowDigit: int, base: nat) - requires 2 <= base && lowDigit <= 0 < lowDigit + base; - requires a == trim(a); - requires IsSkewNumber(a, lowDigit, base); - requires eval(a, base) == 0; - ensures a == []; + requires 2 <= base && lowDigit <= 0 < lowDigit + base + requires a == trim(a) + requires IsSkewNumber(a, lowDigit, base) + requires eval(a, base) == 0 + ensures a == [] { if a != [] { - assert eval(a, base) == a[0] + base * eval(a[1..], base); if eval(a[1..], base) == 0 { TrimProperty(a); - ZeroIsUnique(a[1..], lowDigit, base); + // ZeroIsUnique(a[1..], lowDigit, base); } assert false; } diff --git a/Test/vstte2012/Tree.dfy b/Test/vstte2012/Tree.dfy index a346aac5..662024e4 100644 --- a/Test/vstte2012/Tree.dfy +++ b/Test/vstte2012/Tree.dfy @@ -22,9 +22,9 @@ datatype Result = Fail | Res(t: Tree, sOut: seq) // The postconditions state properties that are needed // in the completeness proof. function toList(d: int, t: Tree): seq - ensures toList(d, t) != [] && toList(d, t)[0] >= d; - ensures (toList(d, t)[0] == d) == (t == Leaf); - decreases t; + ensures toList(d, t) != [] && toList(d, t)[0] >= d + ensures (toList(d, t)[0] == d) == (t == Leaf) + decreases t { match t case Leaf => [d] @@ -43,10 +43,10 @@ function toList(d: int, t: Tree): seq function method build_rec(d: int, s: seq): Result ensures build_rec(d, s).Res? ==> |build_rec(d, s).sOut| < |s| && - build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..]; + build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..] ensures build_rec(d, s).Res? ==> - toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|]; - decreases |s|, (if s==[] then 0 else s[0]-d); + toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|] + decreases |s|, (if s==[] then 0 else s[0]-d) { if s==[] || s[0] < d then Fail @@ -68,7 +68,7 @@ function method build_rec(d: int, s: seq): Result // sequence yields exactly the input sequence. // Completeness is proved as a lemma, see below. function method build(s: seq): Result - ensures build(s).Res? ==> toList(0,build(s).t) == s; + ensures build(s).Res? ==> toList(0,build(s).t) == s { var r := build_rec(0, s); if r.Res? && r.sOut == [] then r else Fail @@ -86,16 +86,14 @@ function method build(s: seq): Result // correctly (encoded by calls to this lemma). lemma lemma0(t: Tree, d: int, s: seq) ensures build_rec(d, toList(d, t) + s).Res? && - build_rec(d, toList(d, t) + s).sOut == s; + build_rec(d, toList(d, t) + s).sOut == s { match(t) { case Leaf => assert toList(d, t) == [d]; case Node(l, r) => assert toList(d, t) + s == toList(d+1, l) + (toList(d+1, r) + s); - - lemma0(l, d+1, toList(d+1, r) + s); // apply the induction hypothesis - lemma0(r, d+1, s); // apply the induction hypothesis + // the rest follows from (two invocations of) the (automatically applied) induction hypothesis } } @@ -107,8 +105,8 @@ lemma lemma0(t: Tree, d: int, s: seq) // the following two lemmas replace these arguments // by quantified variables. lemma lemma1(t: Tree, s:seq) - requires s == toList(0, t) + []; - ensures build(s).Res?; + requires s == toList(0, t) + [] + ensures build(s).Res? { lemma0(t, 0, []); } @@ -117,7 +115,7 @@ lemma lemma1(t: Tree, s:seq) // This lemma introduces the existential quantifier in the completeness // property. lemma lemma2(s: seq) - ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?; + ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res? { forall t | toList(0,t) == s { lemma1(t, s); @@ -131,7 +129,7 @@ lemma lemma2(s: seq) // The body of the method converts the argument of lemma2 // into a universally quantified variable. lemma completeness() - ensures forall s: seq :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?); + ensures forall s: seq :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?) { forall s { lemma2(s); @@ -145,7 +143,7 @@ lemma completeness() // unfold the necessary definitions. method harness0() ensures build([1,3,3,2]).Res? && - build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf)); + build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf)) { } @@ -155,6 +153,6 @@ method harness0() // assertions are required by the verifier to // unfold the necessary definitions. method harness1() - ensures build([1,3,2,2]).Fail?; + ensures build([1,3,2,2]).Fail? { } -- cgit v1.2.3 From 91cee1c2028f9ad995df863f2a4568d95f4ea1a8 Mon Sep 17 00:00:00 2001 From: qunyanm Date: Mon, 28 Mar 2016 12:02:37 -0700 Subject: Make /autoTriggers:1 to be default. Add /autoTriggers:0 to tests that requires it. Don't use pretty warning signs since we can't diff them correctly in the test output from the test run. --- Source/Dafny/DafnyOptions.cs | 6 ++-- Source/DafnyServer/Utilities.cs | 2 +- Test/VSI-Benchmarks/b8.dfy | 2 +- Test/VerifyThis2015/Problem3.dfy | 2 +- Test/cloudmake/CloudMake-CachedBuilds.dfy | 2 +- Test/cloudmake/CloudMake-ConsistentBuilds.dfy | 2 +- Test/cloudmake/CloudMake-ParallelBuilds.dfy | 2 +- Test/dafny0/Basics.dfy | 2 +- Test/dafny0/Calculations.dfy | 2 +- Test/dafny0/Compilation.dfy | 2 +- Test/dafny0/ForallCompilation.dfy | 2 +- Test/dafny0/Fuel.dfy | 2 +- Test/dafny0/LetExpr.dfy | 2 +- Test/dafny0/LetExpr.dfy.expect | 1 + Test/dafny0/LhsDuplicates.dfy | 2 +- Test/dafny0/Parallel.dfy | 2 +- Test/dafny0/SmallTests.dfy.expect | 1 + Test/dafny1/MoreInduction.dfy | 2 +- Test/dafny1/SchorrWaite-stages.dfy | 2 +- Test/dafny1/SchorrWaite.dfy | 2 +- Test/dafny1/Substitution.dfy | 2 +- Test/dafny1/UltraFilter.dfy | 2 +- Test/dafny2/SnapshotableTrees.dfy | 2 +- Test/dafny3/Filter.dfy | 2 +- Test/dafny4/GHC-MergeSort.dfy | 2 +- Test/dafny4/NumberRepresentations.dfy | 2 +- Test/dafny4/Primes.dfy | 2 +- Test/server/simple-session.transcript.expect | 41 +++++++++++++++++++++++++++ Test/vstte2012/BreadthFirstSearch.dfy | 2 +- 29 files changed, 71 insertions(+), 28 deletions(-) (limited to 'Test/dafny4/NumberRepresentations.dfy') diff --git a/Source/Dafny/DafnyOptions.cs b/Source/Dafny/DafnyOptions.cs index f3b38a84..607090eb 100644 --- a/Source/Dafny/DafnyOptions.cs +++ b/Source/Dafny/DafnyOptions.cs @@ -66,7 +66,7 @@ namespace Microsoft.Dafny public bool AllowGlobals = false; public bool CountVerificationErrors = true; public bool Optimize = false; - public bool AutoTriggers = false; + public bool AutoTriggers = true; public bool RewriteFocalPredicates = true; public bool PrintTooltips = false; public bool PrintStats = false; @@ -386,8 +386,8 @@ namespace Microsoft.Dafny 1 (default) - If preprocessing succeeds, set exit code to the number of verification errors. /autoTriggers: - 0 (default) - Do not generate {:trigger} annotations for user-level quantifiers. - 1 - Add a {:trigger} to each user-level quantifier. Existing + 0 - Do not generate {:trigger} annotations for user-level quantifiers. + 1 (default) - Add a {:trigger} to each user-level quantifier. Existing annotations are preserved. /rewriteFocalPredicates: 0 - Don't rewrite predicates in the body of prefix lemmas. diff --git a/Source/DafnyServer/Utilities.cs b/Source/DafnyServer/Utilities.cs index 30d779e7..48bea01a 100644 --- a/Source/DafnyServer/Utilities.cs +++ b/Source/DafnyServer/Utilities.cs @@ -51,7 +51,7 @@ namespace Microsoft.Dafny { DafnyOptions.O.VerifySnapshots = 2; // Use caching DafnyOptions.O.VcsCores = Math.Max(1, System.Environment.ProcessorCount / 2); // Don't use too many cores DafnyOptions.O.PrintTooltips = true; // Dump tooptips (ErrorLevel.Info) to stdout - DafnyOptions.O.UnicodeOutput = true; // Use pretty warning signs + //DafnyOptions.O.UnicodeOutput = true; // Use pretty warning signs DafnyOptions.O.TraceProofObligations = true; // Show which method is being verified, but don't show duration of verification } else { throw new ServerException("Invalid command line options"); diff --git a/Test/VSI-Benchmarks/b8.dfy b/Test/VSI-Benchmarks/b8.dfy index ea1911fe..a44ff5c3 100644 --- a/Test/VSI-Benchmarks/b8.dfy +++ b/Test/VSI-Benchmarks/b8.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 "%s" > "%t" +// RUN: %dafny /compile:0 /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Benchmark 8 diff --git a/Test/VerifyThis2015/Problem3.dfy b/Test/VerifyThis2015/Problem3.dfy index 21bdd4ed..60506a33 100644 --- a/Test/VerifyThis2015/Problem3.dfy +++ b/Test/VerifyThis2015/Problem3.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Rustan Leino diff --git a/Test/cloudmake/CloudMake-CachedBuilds.dfy b/Test/cloudmake/CloudMake-CachedBuilds.dfy index 9e1b511e..5f16da90 100644 --- a/Test/cloudmake/CloudMake-CachedBuilds.dfy +++ b/Test/cloudmake/CloudMake-CachedBuilds.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // This module proves the correctness of the algorithms. It leaves a number of things undefined. diff --git a/Test/cloudmake/CloudMake-ConsistentBuilds.dfy b/Test/cloudmake/CloudMake-ConsistentBuilds.dfy index 6d86607b..c2fa4205 100644 --- a/Test/cloudmake/CloudMake-ConsistentBuilds.dfy +++ b/Test/cloudmake/CloudMake-ConsistentBuilds.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" /******* State *******/ diff --git a/Test/cloudmake/CloudMake-ParallelBuilds.dfy b/Test/cloudmake/CloudMake-ParallelBuilds.dfy index 07cae317..5cc70994 100644 --- a/Test/cloudmake/CloudMake-ParallelBuilds.dfy +++ b/Test/cloudmake/CloudMake-ParallelBuilds.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // This module proves the correctness of the algorithms. It leaves a number of things undefined. diff --git a/Test/dafny0/Basics.dfy b/Test/dafny0/Basics.dfy index 89b0f02a..7b8b632b 100644 --- a/Test/dafny0/Basics.dfy +++ b/Test/dafny0/Basics.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" class Global { diff --git a/Test/dafny0/Calculations.dfy b/Test/dafny0/Calculations.dfy index a7c8e06c..eb4ff1b9 100644 --- a/Test/dafny0/Calculations.dfy +++ b/Test/dafny0/Calculations.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint.dfy" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint.dfy" /autoTriggers:0 "%s" > "%t" // RUN: %dafny /noVerify /compile:0 "%t.dprint.dfy" >> "%t" // RUN: %diff "%s.expect" "%t" diff --git a/Test/dafny0/Compilation.dfy b/Test/dafny0/Compilation.dfy index 7a443e47..213ace54 100644 --- a/Test/dafny0/Compilation.dfy +++ b/Test/dafny0/Compilation.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:3 "%s" > "%t" +// RUN: %dafny /compile:3 /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // The tests in this file are designed to run through the compiler. They contain diff --git a/Test/dafny0/ForallCompilation.dfy b/Test/dafny0/ForallCompilation.dfy index c812983a..4d89f70d 100644 --- a/Test/dafny0/ForallCompilation.dfy +++ b/Test/dafny0/ForallCompilation.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:3 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" method Main() { diff --git a/Test/dafny0/Fuel.dfy b/Test/dafny0/Fuel.dfy index 6347e134..a768db02 100644 --- a/Test/dafny0/Fuel.dfy +++ b/Test/dafny0/Fuel.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" module TestModule1 { diff --git a/Test/dafny0/LetExpr.dfy b/Test/dafny0/LetExpr.dfy index 000fce53..6a0ca66b 100644 --- a/Test/dafny0/LetExpr.dfy +++ b/Test/dafny0/LetExpr.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint.dfy" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint.dfy" /autoTriggers:0 "%s" > "%t" // RUN: %dafny /noVerify /compile:0 "%t.dprint.dfy" >> "%t" // RUN: %diff "%s.expect" "%t" diff --git a/Test/dafny0/LetExpr.dfy.expect b/Test/dafny0/LetExpr.dfy.expect index f0f51274..8f365da3 100644 --- a/Test/dafny0/LetExpr.dfy.expect +++ b/Test/dafny0/LetExpr.dfy.expect @@ -35,5 +35,6 @@ Execution trace: (0,0): anon10_Then Dafny program verifier finished with 39 verified, 9 errors +LetExpr.dfy.tmp.dprint.dfy(162,2): Warning: /!\ No terms found to trigger on. Dafny program verifier finished with 0 verified, 0 errors diff --git a/Test/dafny0/LhsDuplicates.dfy b/Test/dafny0/LhsDuplicates.dfy index 6a84c5a5..8a57f6ce 100644 --- a/Test/dafny0/LhsDuplicates.dfy +++ b/Test/dafny0/LhsDuplicates.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" class MyClass { diff --git a/Test/dafny0/Parallel.dfy b/Test/dafny0/Parallel.dfy index 93a16475..00a1514c 100644 --- a/Test/dafny0/Parallel.dfy +++ b/Test/dafny0/Parallel.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /print:"%t.print" /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" class C { diff --git a/Test/dafny0/SmallTests.dfy.expect b/Test/dafny0/SmallTests.dfy.expect index 6161c3dd..746e978a 100644 --- a/Test/dafny0/SmallTests.dfy.expect +++ b/Test/dafny0/SmallTests.dfy.expect @@ -197,5 +197,6 @@ Execution trace: (0,0): anon0 Dafny program verifier finished with 104 verified, 35 errors +SmallTests.dfy.tmp.dprint.dfy(369,4): Warning: /!\ No trigger covering all quantified variables found. Dafny program verifier finished with 0 verified, 0 errors diff --git a/Test/dafny1/MoreInduction.dfy b/Test/dafny1/MoreInduction.dfy index bd654db5..2b5187a4 100644 --- a/Test/dafny1/MoreInduction.dfy +++ b/Test/dafny1/MoreInduction.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" datatype List = Nil | Cons(Node, List) diff --git a/Test/dafny1/SchorrWaite-stages.dfy b/Test/dafny1/SchorrWaite-stages.dfy index 0eaed68c..a6e5e3aa 100644 --- a/Test/dafny1/SchorrWaite-stages.dfy +++ b/Test/dafny1/SchorrWaite-stages.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Schorr-Waite algorithms, written and verified in Dafny. diff --git a/Test/dafny1/SchorrWaite.dfy b/Test/dafny1/SchorrWaite.dfy index 50210eb1..b0877f9f 100644 --- a/Test/dafny1/SchorrWaite.dfy +++ b/Test/dafny1/SchorrWaite.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Rustan Leino diff --git a/Test/dafny1/Substitution.dfy b/Test/dafny1/Substitution.dfy index da64d004..b9c83aff 100644 --- a/Test/dafny1/Substitution.dfy +++ b/Test/dafny1/Substitution.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" datatype List = Nil | Cons(Expr, List) diff --git a/Test/dafny1/UltraFilter.dfy b/Test/dafny1/UltraFilter.dfy index a32e6e0b..7ac4e749 100644 --- a/Test/dafny1/UltraFilter.dfy +++ b/Test/dafny1/UltraFilter.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // ultra filter diff --git a/Test/dafny2/SnapshotableTrees.dfy b/Test/dafny2/SnapshotableTrees.dfy index 2bdfb83b..033c5db0 100644 --- a/Test/dafny2/SnapshotableTrees.dfy +++ b/Test/dafny2/SnapshotableTrees.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:2 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:2 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Rustan Leino, September 2011. diff --git a/Test/dafny3/Filter.dfy b/Test/dafny3/Filter.dfy index 4f8b35ec..7473a580 100644 --- a/Test/dafny3/Filter.dfy +++ b/Test/dafny3/Filter.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" codatatype Stream = Cons(head: T, tail: Stream) diff --git a/Test/dafny4/GHC-MergeSort.dfy b/Test/dafny4/GHC-MergeSort.dfy index 976b8a27..24903d87 100644 --- a/Test/dafny4/GHC-MergeSort.dfy +++ b/Test/dafny4/GHC-MergeSort.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // Rustan Leino diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy index 0d6cffa1..c15f4987 100644 --- a/Test/dafny4/NumberRepresentations.dfy +++ b/Test/dafny4/NumberRepresentations.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" // We consider a number representation that consists of a sequence of digits. The least diff --git a/Test/dafny4/Primes.dfy b/Test/dafny4/Primes.dfy index fd64b45e..0c2a64dd 100644 --- a/Test/dafny4/Primes.dfy +++ b/Test/dafny4/Primes.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" predicate IsPrime(n: int) diff --git a/Test/server/simple-session.transcript.expect b/Test/server/simple-session.transcript.expect index 1aadca7f..a5f841bc 100644 --- a/Test/server/simple-session.transcript.expect +++ b/Test/server/simple-session.transcript.expect @@ -346,6 +346,7 @@ transcript(10,27): Error: invalid UnaryExpression Verification completed successfully! [SUCCESS] [[DAFNY-SERVER: EOM]] transcript(5,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -366,6 +367,7 @@ Execution trace: Verification completed successfully! [SUCCESS] [[DAFNY-SERVER: EOM]] transcript(5,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -383,6 +385,7 @@ Verifying Impl$$_module.__default.M_k ... Verification completed successfully! [SUCCESS] [[DAFNY-SERVER: EOM]] transcript(5,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -406,6 +409,7 @@ transcript(12,0): Error: invalid UpdateStmt Verification completed successfully! [SUCCESS] [[DAFNY-SERVER: EOM]] transcript(5,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -423,6 +427,7 @@ Verifying Impl$$_module.__default.M_k ... Verification completed successfully! [SUCCESS] [[DAFNY-SERVER: EOM]] transcript(5,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -498,6 +503,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -538,6 +547,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -584,6 +597,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -638,6 +655,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -685,6 +706,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -729,6 +754,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -796,6 +825,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -861,6 +894,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... @@ -905,6 +942,10 @@ transcript(5,0): Warning: module-level methods are always non-instance, so the ' transcript(15,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(24,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here transcript(33,0): Warning: module-level methods are always non-instance, so the 'static' keyword is not allowed here +transcript(10,9): Warning: /!\ No terms found to trigger on. +transcript(20,9): Warning: /!\ No terms found to trigger on. +transcript(29,9): Warning: /!\ No terms found to trigger on. +transcript(38,9): Warning: /!\ No terms found to trigger on. Verifying CheckWellformed$$_module.__default.A ... Retrieving cached verification result for implementation CheckWellformed$$_module.__default.A... diff --git a/Test/vstte2012/BreadthFirstSearch.dfy b/Test/vstte2012/BreadthFirstSearch.dfy index b111a438..375f4a09 100644 --- a/Test/vstte2012/BreadthFirstSearch.dfy +++ b/Test/vstte2012/BreadthFirstSearch.dfy @@ -1,4 +1,4 @@ -// RUN: %dafny /compile:0 /dprint:"%t.dprint" /vcsMaxKeepGoingSplits:10 "%s" > "%t" +// RUN: %dafny /compile:0 /dprint:"%t.dprint" /vcsMaxKeepGoingSplits:10 /autoTriggers:0 "%s" > "%t" // RUN: %diff "%s.expect" "%t" class BreadthFirstSearch -- cgit v1.2.3