summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar leino <unknown>2015-08-12 22:44:50 -0700
committerGravatar leino <unknown>2015-08-12 22:44:50 -0700
commitf28fa68497352ffb57ab2846da4cc1c1aeaf1968 (patch)
tree4eaf17362df86914266669a238e50028a478dc2e
parent41d16e5a28b4eab7fb56f04c76759f8e24678b74 (diff)
Change the induction heuristic for lemmas to also look in precondition for clues about which parameters to include.
As a result, improved many test cases (see, e.g., dafny4/NipkowKlein-chapter7.dfy!) and beautified some others.
-rw-r--r--Source/Dafny/Printer.cs3
-rw-r--r--Source/Dafny/Rewriter.cs23
-rw-r--r--Test/VerifyThis2015/Problem1.dfy8
-rw-r--r--Test/dafny0/InductivePredicates.dfy41
-rw-r--r--Test/dafny0/InductivePredicates.dfy.expect6
-rw-r--r--Test/dafny0/Parallel.dfy146
-rw-r--r--Test/dafny3/Filter.dfy16
-rw-r--r--Test/dafny3/GenericSort.dfy56
-rw-r--r--Test/dafny3/InfiniteTrees.dfy44
-rw-r--r--Test/dafny4/ACL2-extractor.dfy8
-rw-r--r--Test/dafny4/KozenSilva.dfy80
-rw-r--r--Test/dafny4/KozenSilva.dfy.expect2
-rw-r--r--Test/dafny4/NipkowKlein-chapter7.dfy46
-rw-r--r--Test/dafny4/NumberRepresentations.dfy13
-rw-r--r--Test/vstte2012/Tree.dfy32
15 files changed, 236 insertions, 288 deletions
diff --git a/Source/Dafny/Printer.cs b/Source/Dafny/Printer.cs
index 1f63d769..6a6a76ba 100644
--- a/Source/Dafny/Printer.cs
+++ b/Source/Dafny/Printer.cs
@@ -438,6 +438,7 @@ namespace Microsoft.Dafny {
public void PrintAttributes(Attributes a) {
if (a != null) {
PrintAttributes(a.Prev);
+ wr.Write(" ");
PrintOneAttribute(a);
}
}
@@ -445,7 +446,7 @@ namespace Microsoft.Dafny {
Contract.Requires(a != null);
var name = nameSubstitution ?? a.Name;
var usAttribute = name.StartsWith("_");
- wr.Write(" {1}{{:{0}", name, usAttribute ? "/*" : "");
+ wr.Write("{1}{{:{0}", name, usAttribute ? "/*" : "");
if (a.Args != null) {
PrintAttributeArgs(a.Args, false);
}
diff --git a/Source/Dafny/Rewriter.cs b/Source/Dafny/Rewriter.cs
index 4ac709f6..f107a819 100644
--- a/Source/Dafny/Rewriter.cs
+++ b/Source/Dafny/Rewriter.cs
@@ -1336,25 +1336,25 @@ namespace Microsoft.Dafny
}
void ComputeLemmaInduction(Method method) {
Contract.Requires(method != null);
- if (method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && !(method is FixpointLemma)) {
- var posts = new List<Expression>();
- method.Ens.ForEach(mfe => posts.Add(mfe.E));
- ComputeInductionVariables(method.tok, method.Ins, posts, ref method.Attributes);
+ if (method.Body != null && method.IsGhost && method.Mod.Expressions.Count == 0 && method.Outs.Count == 0 && !(method is FixpointLemma)) {
+ var specs = new List<Expression>();
+ method.Req.ForEach(mfe => specs.Add(mfe.E));
+ method.Ens.ForEach(mfe => specs.Add(mfe.E));
+ ComputeInductionVariables(method.tok, method.Ins, specs, method, ref method.Attributes);
}
}
- void ComputeInductionVariables<VarType>(IToken tok, List<VarType> boundVars, List<Expression> searchExprs, ref Attributes attributes) where VarType : class, IVariable {
+ void ComputeInductionVariables<VarType>(IToken tok, List<VarType> boundVars, List<Expression> searchExprs, Method lemma, ref Attributes attributes) where VarType : class, IVariable {
Contract.Requires(tok != null);
Contract.Requires(boundVars != null);
Contract.Requires(searchExprs != null);
Contract.Requires(DafnyOptions.O.Induction != 0);
- bool forLemma = boundVars is List<Formal>;
var args = Attributes.FindExpressions(attributes, "induction"); // we only look at the first one we find, since it overrides any other ones
if (args == null) {
if (DafnyOptions.O.Induction < 2) {
// No explicit induction variables and we're asked not to infer anything, so we're done
return;
- } else if (DafnyOptions.O.Induction == 2 && forLemma) {
+ } else if (DafnyOptions.O.Induction == 2 && lemma != null) {
// We're asked to infer induction variables only for quantifiers, not for lemmas
return;
}
@@ -1384,13 +1384,13 @@ namespace Microsoft.Dafny
}
if (0 <= boundVars.FindIndex(v => v == ie.Var)) {
Resolver.Warning(arg.tok, "{0}s given as :induction arguments must be given in the same order as in the {1}; ignoring attribute",
- forLemma ? "lemma parameter" : "bound variable", forLemma ? "lemma" : "quantifier");
+ lemma != null ? "lemma parameter" : "bound variable", lemma != null ? "lemma" : "quantifier");
return;
}
}
Resolver.Warning(arg.tok, "invalid :induction attribute argument; expected {0}{1}; ignoring attribute",
i == 0 ? "'false' or 'true' or " : "",
- forLemma ? "lemma parameter" : "bound variable");
+ lemma != null ? "lemma parameter" : "bound variable");
return;
}
// The argument list was legal, so let's use it for the _induction attribute
@@ -1410,6 +1410,9 @@ namespace Microsoft.Dafny
attributes = new Attributes("_induction", inductionVariables, attributes);
// And since we're inferring something, let's also report that in a hover text.
var s = Printer.OneAttributeToString(attributes, "induction");
+ if (lemma is PrefixLemma) {
+ s = lemma.Name + " " + s;
+ }
Resolver.ReportAdditionalInformation(tok, s);
}
}
@@ -1423,7 +1426,7 @@ namespace Microsoft.Dafny
protected override void VisitOneExpr(Expression expr) {
var q = expr as QuantifierExpr;
if (q != null) {
- IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List<Expression>() { q.LogicalBody() }, ref q.Attributes);
+ IndRewriter.ComputeInductionVariables(q.tok, q.BoundVars, new List<Expression>() { q.LogicalBody() }, null, ref q.Attributes);
}
}
void VisitInductionStmt(Statement stmt) {
diff --git a/Test/VerifyThis2015/Problem1.dfy b/Test/VerifyThis2015/Problem1.dfy
index 2e8a5243..67eeba07 100644
--- a/Test/VerifyThis2015/Problem1.dfy
+++ b/Test/VerifyThis2015/Problem1.dfy
@@ -161,18 +161,13 @@ lemma Same2<T>(pat: seq<T>, a: seq<T>)
ensures IRP_Alt(pat, a)
{
if pat == [] {
- assert pat <= a;
} else if a != [] && pat[0] == a[0] {
- assert IsRelaxedPrefixAux(pat[1..], a[1..], 1);
- Same2(pat[1..], a[1..]);
if pat[1..] <= a[1..] {
- assert pat <= a;
} else {
var k :| 0 <= k < |pat[1..]| && pat[1..][..k] + pat[1..][k+1..] <= a[1..];
assert 0 <= k+1 < |pat| && pat[..k+1] + pat[k+2..] <= a;
}
} else {
- assert IsRelaxedPrefixAux(pat[1..], a, 0);
Same2_Prefix(pat[1..], a);
assert pat[1..] <= a;
assert 0 <= 0 < |pat| && pat[..0] + pat[0+1..] <= a;
@@ -182,7 +177,4 @@ lemma Same2_Prefix<T>(pat: seq<T>, a: seq<T>)
requires IsRelaxedPrefixAux(pat, a, 0)
ensures pat <= a
{
- if pat != [] {
- Same2_Prefix(pat[1..], a[1..]);
- }
}
diff --git a/Test/dafny0/InductivePredicates.dfy b/Test/dafny0/InductivePredicates.dfy
index 424118e7..8d05af11 100644
--- a/Test/dafny0/InductivePredicates.dfy
+++ b/Test/dafny0/InductivePredicates.dfy
@@ -18,7 +18,7 @@ lemma M(x: natinf)
}
// yay! my first proof involving an inductive predicate :)
-lemma M'(k: nat, x: natinf)
+lemma {:induction false} M'(k: nat, x: natinf)
requires Even#[k](x)
ensures x.N? && x.n % 2 == 0
{
@@ -32,8 +32,14 @@ lemma M'(k: nat, x: natinf)
}
}
+lemma M'_auto(k: nat, x: natinf)
+ requires Even#[k](x)
+ ensures x.N? && x.n % 2 == 0
+{
+}
+
// Here is the same proof as in M / M', but packaged into a single "inductive lemma":
-inductive lemma IL(x: natinf)
+inductive lemma {:induction false} IL(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -45,7 +51,7 @@ inductive lemma IL(x: natinf)
}
}
-inductive lemma IL_EvenBetter(x: natinf)
+inductive lemma {:induction false} IL_EvenBetter(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -57,6 +63,12 @@ inductive lemma IL_EvenBetter(x: natinf)
}
}
+inductive lemma IL_Best(x: natinf)
+ requires Even(x)
+ ensures x.N? && x.n % 2 == 0
+{
+}
+
inductive lemma IL_Bad(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
@@ -107,7 +119,7 @@ module Alt {
{
match x
case N(n) => N(n+1)
- case Inf => Inf
+ case Inf => Inf
}
inductive predicate Even(x: natinf)
@@ -116,7 +128,7 @@ module Alt {
exists y :: x == S(S(y)) && Even(y)
}
- inductive lemma MyLemma_NotSoNice(x: natinf)
+ inductive lemma {:induction false} MyLemma_NotSoNice(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -130,7 +142,7 @@ module Alt {
}
}
- inductive lemma MyLemma_NiceButNotFast(x: natinf)
+ inductive lemma {:induction false} MyLemma_NiceButNotFast(x: natinf)
requires Even(x)
ensures x.N? && x.n % 2 == 0
{
@@ -143,7 +155,13 @@ module Alt {
assert x.n == y.n + 2;
}
}
-
+
+ inductive lemma MyLemma_RealNice_AndFastToo(x: natinf)
+ requires Even(x)
+ ensures x.N? && x.n % 2 == 0
+ {
+ }
+
lemma InfNotEven()
ensures !Even(Inf)
{
@@ -156,15 +174,6 @@ module Alt {
requires Even(Inf)
ensures false
{
- var x := Inf;
- if {
- case x.N? && x.n == 0 =>
- assert false; // this case is absurd
- case exists y :: x == S(S(y)) && Even(y) =>
- var y :| x == S(S(y)) && Even(y);
- assert y == Inf;
- InfNotEven_Aux();
- }
}
lemma NextEven(x: natinf)
diff --git a/Test/dafny0/InductivePredicates.dfy.expect b/Test/dafny0/InductivePredicates.dfy.expect
index ccf30643..48beade5 100644
--- a/Test/dafny0/InductivePredicates.dfy.expect
+++ b/Test/dafny0/InductivePredicates.dfy.expect
@@ -1,9 +1,9 @@
-InductivePredicates.dfy(64,9): Error: assertion violation
+InductivePredicates.dfy(76,9): Error: assertion violation
Execution trace:
(0,0): anon0
(0,0): anon3_Then
-InductivePredicates.dfy(76,10): Error: assertion violation
+InductivePredicates.dfy(88,10): Error: assertion violation
Execution trace:
(0,0): anon0
-Dafny program verifier finished with 29 verified, 2 errors
+Dafny program verifier finished with 35 verified, 2 errors
diff --git a/Test/dafny0/Parallel.dfy b/Test/dafny0/Parallel.dfy
index 030eb350..e0d6491b 100644
--- a/Test/dafny0/Parallel.dfy
+++ b/Test/dafny0/Parallel.dfy
@@ -2,13 +2,13 @@
// RUN: %diff "%s.expect" "%t"
class C {
- var data: int;
- var n: nat;
- var st: set<object>;
+ var data: int
+ var n: nat
+ var st: set<object>
ghost method CLemma(k: int)
- requires k != -23;
- ensures data < k; // magic, isn't it (or bogus, some would say)
+ requires k != -23
+ ensures data < k // magic, isn't it (or bogus, some would say)
}
// This method more or less just tests the syntax, resolution, and basic verification
@@ -19,31 +19,31 @@ method ParallelStatement_Resolve(
S: set<int>,
clx: C, cly: C, clk: int
)
- requires a != null && null !in spine;
- modifies a, spine;
+ requires a != null && null !in spine
+ modifies a, spine
{
- forall (i: int | 0 <= i < a.Length && i % 2 == 0) {
+ forall i | 0 <= i < a.Length && i % 2 == 0 {
a[i] := a[(i + 1) % a.Length] + 3;
}
- forall (o | o in spine) {
+ forall o | o in spine {
o.st := o.st + Repr;
}
- forall (x, y | x in S && 0 <= y+x < 100) {
+ forall x, y | x in S && 0 <= y+x < 100 {
Lemma(clx, x, y); // error: precondition does not hold (clx may be null)
}
- forall (x, y | x in S && 0 <= y+x < 100) {
+ forall x, y | x in S && 0 <= y+x < 100 {
cly.CLemma(x + y); // error: receiver might be null
}
- forall (p | 0 <= p)
- ensures F(p) <= Sum(p) + p - 1; // error (no connection is known between F and Sum)
+ forall p | 0 <= p
+ ensures F(p) <= Sum(p) + p - 1 // error (no connection is known between F and Sum)
{
assert 0 <= G(p);
ghost var t;
- if (p % 2 == 0) {
+ if p % 2 == 0 {
assert G(p) == F(p+2); // error (there's nothing that gives any relation between F and G)
t := p+p;
} else {
@@ -56,11 +56,11 @@ method ParallelStatement_Resolve(
}
}
-ghost method Lemma(c: C, x: int, y: int)
- requires c != null;
- ensures c.data <= x+y;
-ghost method PowerLemma(x: int, y: int)
- ensures Pred(x, y);
+lemma Lemma(c: C, x: int, y: int)
+ requires c != null
+ ensures c.data <= x+y
+lemma PowerLemma(x: int, y: int)
+ ensures Pred(x, y)
function F(x: int): int
function G(x: int): nat
@@ -71,54 +71,54 @@ function Pred(x: int, y: int): bool
// ---------------------------------------------------------------------
method M0(S: set<C>)
- requires null !in S;
- modifies S;
- ensures forall o :: o in S ==> o.data == 85;
- ensures forall o :: o != null && o !in S ==> o.data == old(o.data);
+ requires null !in S
+ modifies S
+ ensures forall o :: o in S ==> o.data == 85
+ ensures forall o :: o != null && o !in S ==> o.data == old(o.data)
{
- forall (s | s in S) {
+ forall s | s in S {
s.data := 85;
}
}
method M1(S: set<C>, x: C)
- requires null !in S && x in S;
+ requires null !in S && x in S
{
- forall (s | s in S)
- ensures s.data < 100;
+ forall s | s in S
+ ensures s.data < 100
{
assume s.data == 85;
}
- if (*) {
+ if * {
assert x.data == 85; // error (cannot be inferred from forall ensures clause)
} else {
assert x.data < 120;
}
- forall (s | s in S)
- ensures s.data < 70; // error
+ forall s | s in S
+ ensures s.data < 70 // error
{
assume s.data == 85;
}
}
method M2() returns (a: array<int>)
- ensures a != null;
- ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j];
+ ensures a != null
+ ensures forall i,j :: 0 <= i < a.Length/2 <= j < a.Length ==> a[i] < a[j]
{
a := new int[250];
- forall (i: nat | i < 125) {
+ forall i: nat | i < 125 {
a[i] := 423;
}
- forall (i | 125 <= i < 250) {
+ forall i | 125 <= i < 250 {
a[i] := 300 + i;
}
}
method M4(S: set<C>, k: int)
- modifies S;
+ modifies S
{
- forall (s | s in S && s != null) {
+ forall s | s in S && s != null {
s.n := k; // error: k might be negative
}
}
@@ -127,25 +127,25 @@ method M5()
{
if {
case true =>
- forall (x | 0 <= x < 100) {
+ forall x | 0 <= x < 100 {
PowerLemma(x, x);
}
assert Pred(34, 34);
case true =>
- forall (x,y | 0 <= x < 100 && y == x+1) {
+ forall x,y | 0 <= x < 100 && y == x+1 {
PowerLemma(x, y);
}
assert Pred(34, 35);
case true =>
- forall (x,y | 0 <= x < y < 100) {
+ forall x,y | 0 <= x < y < 100 {
PowerLemma(x, y);
}
assert Pred(34, 35);
case true =>
- forall (x | x in set k | 0 <= k < 100) {
+ forall x | x in set k | 0 <= k < 100 {
PowerLemma(x, x);
}
assert Pred(34, 34);
@@ -155,22 +155,22 @@ method M5()
method Main()
{
var a := new int[180];
- forall (i | 0 <= i < 180) {
+ forall i | 0 <= i < 180 {
a[i] := 2*i + 100;
}
var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5];
- forall (i | 0 <= i < |sq|) {
+ forall i | 0 <= i < |sq| {
a[20+i] := sq[i];
}
- forall (t | t in sq) {
+ forall t | t in sq {
a[t] := 1000;
}
- forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) {
+ forall t,u | t in sq && t < 4 && 10 <= u < 10+t {
a[u] := 6000 + t;
}
var k := 0;
- while (k < 180) {
- if (k != 0) { print ", "; }
+ while k < 180 {
+ if k != 0 { print ", "; }
print a[k];
k := k + 1;
}
@@ -180,50 +180,50 @@ method Main()
method DuplicateUpdate() {
var a := new int[180];
var sq := [0, 0, 0, 2, 2, 2, 5, 5, 5];
- if (*) {
- forall (t,u | t in sq && 10 <= u < 10+t) {
+ if * {
+ forall t,u | t in sq && 10 <= u < 10+t {
a[u] := 6000 + t; // error: a[10] (and a[11]) are assigned more than once
}
} else {
- forall (t,u | t in sq && t < 4 && 10 <= u < 10+t) {
+ forall t,u | t in sq && t < 4 && 10 <= u < 10+t {
a[u] := 6000 + t; // with the 't < 4' conjunct in the line above, this is fine
}
}
}
-ghost method DontDoMuch(x: int)
+lemma DontDoMuch(x: int)
{
}
method OmittedRange() {
- forall (x: int) { } // a type is still needed for the bound variable
- forall (x) {
+ forall x: int { } // a type is still needed for the bound variable
+ forall x {
DontDoMuch(x);
}
}
// ----------------------- two-state postconditions ---------------------------------
-class TwoState_C { ghost var data: int; }
+class TwoState_C { ghost var data: int }
// It is not possible to achieve this postcondition in a ghost method, because ghost
// contexts are not allowed to allocate state. Callers of this ghost method will know
// that the postcondition is tantamount to 'false'.
ghost method TwoState0(y: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ ensures exists o: TwoState_C :: o != null && fresh(o)
method TwoState_Main0() {
- forall (x) { TwoState0(x); }
+ forall x { TwoState0(x); }
assert false; // no prob, because the postcondition of TwoState0 implies false
}
method X_Legit(c: TwoState_C)
- requires c != null;
- modifies c;
+ requires c != null
+ modifies c
{
c.data := c.data + 1;
- forall (x | c.data <= x)
- ensures old(c.data) < x; // note that the 'old' refers to the method's initial state
+ forall x | c.data <= x
+ ensures old(c.data) < x // note that the 'old' refers to the method's initial state
{
}
}
@@ -235,8 +235,8 @@ method X_Legit(c: TwoState_C)
// method, not the beginning of the 'forall' statement.
method TwoState_Main2()
{
- forall (x: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ forall x: int
+ ensures exists o: TwoState_C :: o != null && fresh(o)
{
TwoState0(x);
}
@@ -251,8 +251,8 @@ method TwoState_Main2()
// statement's effect on the heap is not optimized away.
method TwoState_Main3()
{
- forall (x: int)
- ensures exists o: TwoState_C :: o != null && fresh(o);
+ forall x: int
+ ensures exists o: TwoState_C :: o != null && fresh(o)
{
assume false; // (there's no other way to achieve this forall-statement postcondition)
}
@@ -262,11 +262,11 @@ method TwoState_Main3()
// ------- empty forall statement -----------------------------------------
class EmptyForallStatement {
- var emptyPar: int;
+ var emptyPar: int
method Empty_Parallel0()
- modifies this;
- ensures emptyPar == 8;
+ modifies this
+ ensures emptyPar == 8
{
forall () {
this.emptyPar := 8;
@@ -274,11 +274,11 @@ class EmptyForallStatement {
}
function EmptyPar_P(x: int): bool
- ghost method EmptyPar_Lemma(x: int)
- ensures EmptyPar_P(x);
+ lemma EmptyPar_Lemma(x: int)
+ ensures EmptyPar_P(x)
method Empty_Parallel1()
- ensures EmptyPar_P(8);
+ ensures EmptyPar_P(8)
{
forall {
EmptyPar_Lemma(8);
@@ -288,7 +288,7 @@ class EmptyForallStatement {
method Empty_Parallel2()
{
forall
- ensures exists k :: EmptyPar_P(k);
+ ensures exists k :: EmptyPar_P(k)
{
var y := 8;
assume EmptyPar_P(y);
@@ -312,9 +312,9 @@ predicate ThProperty(step: nat, t: Nat, r: nat)
case Succ(o) => step>0 && exists ro:nat :: ThProperty(step-1, o, ro)
}
-ghost method Th(step: nat, t: Nat, r: nat)
- requires t.Succ? && ThProperty(step, t, r);
+lemma Th(step: nat, t: Nat, r: nat)
+ requires t.Succ? && ThProperty(step, t, r)
// the next line follows from the precondition and the definition of ThProperty
- ensures exists ro:nat :: ThProperty(step-1, t.tail, ro);
+ ensures exists ro:nat :: ThProperty(step-1, t.tail, ro)
{
}
diff --git a/Test/dafny3/Filter.dfy b/Test/dafny3/Filter.dfy
index 24c8e94e..6e67de26 100644
--- a/Test/dafny3/Filter.dfy
+++ b/Test/dafny3/Filter.dfy
@@ -35,7 +35,7 @@ lemma Lemma_TailSubStreamK(s: Stream, u: Stream, k: nat) // this lemma could ha
{
if k != 0 {
Lemma_InTail(s.head, u);
- Lemma_TailSubStreamK(s.tail, u, k-1);
+ //Lemma_TailSubStreamK(s.tail, u, k-1);
}
}
lemma Lemma_InSubStream<T>(x: T, s: Stream<T>, u: Stream<T>)
@@ -193,10 +193,10 @@ lemma FS_Ping<T>(s: Stream<T>, h: PredicateHandle, x: T)
}
lemma FS_Pong<T>(s: Stream<T>, h: PredicateHandle, x: T, k: nat)
- requires AlwaysAnother(s, h) && In(x, s) && P(x, h);
- requires Tail(s, k).head == x;
- ensures In(x, Filter(s, h));
- decreases k;
+ requires AlwaysAnother(s, h) && In(x, s) && P(x, h)
+ requires Tail(s, k).head == x
+ ensures In(x, Filter(s, h))
+ decreases k
{
var fs := Filter(s, h);
if s.head == x {
@@ -205,14 +205,14 @@ lemma FS_Pong<T>(s: Stream<T>, h: PredicateHandle, x: T, k: nat)
assert fs == Cons(s.head, Filter(s.tail, h)); // reminder of where we are
calc {
true;
- == { FS_Pong(s.tail, h, x, k-1); }
+ //== { FS_Pong(s.tail, h, x, k-1); }
In(x, Filter(s.tail, h));
==> { assert fs.head != x; Lemma_InTail(x, fs); }
In(x, fs);
}
} else {
- assert fs == Filter(s.tail, h); // reminder of where we are
- FS_Pong(s.tail, h, x, k-1);
+ //assert fs == Filter(s.tail, h); // reminder of where we are
+ //FS_Pong(s.tail, h, x, k-1);
}
}
diff --git a/Test/dafny3/GenericSort.dfy b/Test/dafny3/GenericSort.dfy
index 7ea038be..6bd06965 100644
--- a/Test/dafny3/GenericSort.dfy
+++ b/Test/dafny3/GenericSort.dfy
@@ -7,25 +7,25 @@ abstract module TotalOrder {
// Three properties of total orders. Here, they are given as unproved
// lemmas, that is, as axioms.
lemma Antisymmetry(a: T, b: T)
- requires Leq(a, b) && Leq(b, a);
- ensures a == b;
+ requires Leq(a, b) && Leq(b, a)
+ ensures a == b
lemma Transitivity(a: T, b: T, c: T)
- requires Leq(a, b) && Leq(b, c);
- ensures Leq(a, c);
+ requires Leq(a, b) && Leq(b, c)
+ ensures Leq(a, c)
lemma Totality(a: T, b: T)
- ensures Leq(a, b) || Leq(b, a);
+ ensures Leq(a, b) || Leq(b, a)
}
abstract module Sort {
import O as TotalOrder // let O denote some module that has the members of TotalOrder
predicate Sorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- reads a;
+ requires a != null && 0 <= low <= high <= a.Length
+ reads a
// The body of the predicate is hidden outside the module, but the postcondition is
// "exported" (that is, visible, known) outside the module. Here, we choose the
// export the following property:
- ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j]);
+ ensures Sorted(a, low, high) ==> forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j])
{
forall i, j :: low <= i < j < high ==> O.Leq(a[i], a[j])
}
@@ -33,18 +33,18 @@ abstract module Sort {
// In the insertion sort routine below, it's more convenient to keep track of only that
// neighboring elements of the array are sorted...
predicate NeighborSorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- reads a;
+ requires a != null && 0 <= low <= high <= a.Length
+ reads a
{
forall i :: low < i < high ==> O.Leq(a[i-1], a[i])
}
// ...but we show that property to imply all pairs to be sorted. The proof of this
// lemma uses the transitivity property.
lemma NeighborSorted_implies_Sorted(a: array<O.T>, low: int, high: int)
- requires a != null && 0 <= low <= high <= a.Length;
- requires NeighborSorted(a, low, high);
- ensures Sorted(a, low, high);
- decreases high - low;
+ requires a != null && 0 <= low <= high <= a.Length
+ requires NeighborSorted(a, low, high)
+ ensures Sorted(a, low, high)
+ decreases high - low
{
if low != high {
NeighborSorted_implies_Sorted(a, low+1, high);
@@ -57,25 +57,25 @@ abstract module Sort {
// Standard insertion sort method
method InsertionSort(a: array<O.T>)
- requires a != null;
- modifies a;
- ensures Sorted(a, 0, a.Length);
- ensures multiset(a[..]) == old(multiset(a[..]));
+ requires a != null
+ modifies a
+ ensures Sorted(a, 0, a.Length)
+ ensures multiset(a[..]) == old(multiset(a[..]))
{
if a.Length == 0 { return; }
var i := 1;
while i < a.Length
- invariant 0 < i <= a.Length;
- invariant NeighborSorted(a, 0, i);
- invariant multiset(a[..]) == old(multiset(a[..]));
+ invariant 0 < i <= a.Length
+ invariant NeighborSorted(a, 0, i)
+ invariant multiset(a[..]) == old(multiset(a[..]))
{
var j := i;
while 0 < j && !O.Leq(a[j-1], a[j])
- invariant 0 <= j <= i;
- invariant NeighborSorted(a, 0, j);
- invariant NeighborSorted(a, j, i+1);
- invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1]);
- invariant multiset(a[..]) == old(multiset(a[..]));
+ invariant 0 <= j <= i
+ invariant NeighborSorted(a, 0, j)
+ invariant NeighborSorted(a, j, i+1)
+ invariant 0 < j < i ==> O.Leq(a[j-1], a[j+1])
+ invariant multiset(a[..]) == old(multiset(a[..]))
{
// The proof of correctness uses the totality property here.
// It implies that if two elements are not previously in
@@ -97,7 +97,7 @@ module IntOrder refines TotalOrder {
datatype T = Int(i: int)
// Define the ordering on these integers
predicate method Leq ...
- ensures Leq(a, b) ==> a.i <= b.i;
+ ensures Leq(a, b) ==> a.i <= b.i
{
a.i <= b.i
}
@@ -156,7 +156,7 @@ module intOrder refines TotalOrder {
type T = int
// Define the ordering on these integers
predicate method Leq ...
- ensures Leq(a, b) ==> a <= b;
+ ensures Leq(a, b) ==> a <= b
{
a <= b
}
diff --git a/Test/dafny3/InfiniteTrees.dfy b/Test/dafny3/InfiniteTrees.dfy
index 516a9e4e..85f73bc3 100644
--- a/Test/dafny3/InfiniteTrees.dfy
+++ b/Test/dafny3/InfiniteTrees.dfy
@@ -286,7 +286,7 @@ lemma Theorem1_Lemma(t: Tree, n: nat, p: Stream<int>)
LowerThan(ch, n);
==> // def. LowerThan
LowerThan(ch.head.children, n-1);
- ==> { Theorem1_Lemma(ch.head, n-1, tail); }
+ ==> //{ Theorem1_Lemma(ch.head, n-1, tail); }
!IsNeverEndingStream(tail);
==> // def. IsNeverEndingStream
!IsNeverEndingStream(p);
@@ -374,30 +374,30 @@ lemma Proposition3b()
}
}
lemma Proposition3b_Lemma(t: Tree, h: nat, p: Stream<int>)
- requires LowerThan(t.children, h) && ValidPath(t, p);
- ensures !IsNeverEndingStream(p);
- decreases h;
+ requires LowerThan(t.children, h) && ValidPath(t, p)
+ ensures !IsNeverEndingStream(p)
+ decreases h
{
match p {
case Nil =>
case Cons(index, tail) =>
// From the definition of ValidPath(t, p), we get the following:
var ch := Tail(t.children, index);
- assert ch.Cons? && ValidPath(ch.head, tail);
+ // assert ch.Cons? && ValidPath(ch.head, tail);
// From the definition of LowerThan(t.children, h), we get the following:
match t.children {
case Nil =>
ValidPath_Lemma(p);
assert false; // absurd case
case Cons(_, _) =>
- assert 1 <= h;
+ // assert 1 <= h;
LowerThan_Lemma(t.children, index, h);
- assert LowerThan(ch, h);
+ // assert LowerThan(ch, h);
}
// Putting these together, by ch.Cons? and the definition of LowerThan(ch, h), we get:
- assert LowerThan(ch.head.children, h-1);
+ // assert LowerThan(ch.head.children, h-1);
// And now we can invoke the induction hypothesis:
- Proposition3b_Lemma(ch.head, h-1, tail);
+ // Proposition3b_Lemma(ch.head, h-1, tail);
}
}
@@ -627,30 +627,10 @@ colemma Path_Lemma2'(p: Stream<int>)
}
}
colemma Path_Lemma2''(p: Stream<int>, n: nat, tail: Stream<int>)
- requires IsNeverEndingStream(p) && p.tail == tail;
- ensures InfinitePath'(S2N'(n, tail));
+ requires IsNeverEndingStream(p) && p.tail == tail
+ ensures InfinitePath'(S2N'(n, tail))
{
- if n <= 0 {
- calc {
- InfinitePath'#[_k](S2N'(n, tail));
- // def. S2N'
- InfinitePath'#[_k](Zero(S2N(tail)));
- // def. InfinitePath'
- InfinitePath#[_k-1](S2N(tail));
- { Path_Lemma2'(tail); }
- true;
- }
- } else {
- calc {
- InfinitePath'#[_k](S2N'(n, tail));
- // def. S2N'
- InfinitePath'#[_k](Succ(S2N'(n-1, tail)));
- // def. InfinitePath'
- InfinitePath'#[_k-1](S2N'(n-1, tail));
- { Path_Lemma2''(p, n-1, tail); }
- true;
- }
- }
+ Path_Lemma2'(tail);
}
lemma Path_Lemma3(r: CoOption<Number>)
ensures InfinitePath(r) ==> IsNeverEndingStream(N2S(r));
diff --git a/Test/dafny4/ACL2-extractor.dfy b/Test/dafny4/ACL2-extractor.dfy
index 8fe98531..bd2c9d83 100644
--- a/Test/dafny4/ACL2-extractor.dfy
+++ b/Test/dafny4/ACL2-extractor.dfy
@@ -116,9 +116,9 @@ lemma RevLength(xs: List)
// you can prove two lists equal by proving their elements equal
lemma EqualElementsMakeEqualLists(xs: List, ys: List)
- requires length(xs) == length(ys);
- requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys);
- ensures xs == ys;
+ requires length(xs) == length(ys)
+ requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys)
+ ensures xs == ys
{
match xs {
case Nil =>
@@ -132,7 +132,7 @@ lemma EqualElementsMakeEqualLists(xs: List, ys: List)
nth(i+1, xs) == nth(i+1, ys);
}
}
- EqualElementsMakeEqualLists(xs.tail, ys.tail);
+ // EqualElementsMakeEqualLists(xs.tail, ys.tail);
}
}
diff --git a/Test/dafny4/KozenSilva.dfy b/Test/dafny4/KozenSilva.dfy
index af0cdc71..ef49b10f 100644
--- a/Test/dafny4/KozenSilva.dfy
+++ b/Test/dafny4/KozenSilva.dfy
@@ -25,8 +25,8 @@ copredicate LexLess(s: Stream<int>, t: Stream<int>)
// A co-lemma is used to establish the truth of a co-predicate.
colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires LexLess(s, t) && LexLess(t, u);
- ensures LexLess(s, u);
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
{
// Here is the proof, which is actually a body of code. It lends itself to a
// simple, intuitive co-inductive reading. For a theorem this simple, this simple
@@ -38,6 +38,14 @@ colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream
}
}
+// Actually, Dafny can do the proof of the previous lemma completely automatically. Here it is:
+colemma Theorem1_LexLess_Is_Transitive_Automatic(s: Stream<int>, t: Stream<int>, u: Stream<int>)
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
+{
+ // no manual proof needed, so the body of the co-lemma is empty
+}
+
// The following predicate captures the (inductively defined) negation of (the
// co-inductively defined) LexLess above.
predicate NotLexLess(s: Stream<int>, t: Stream<int>)
@@ -51,7 +59,7 @@ predicate NotLexLess'(k: nat, s: Stream<int>, t: Stream<int>)
}
lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
- ensures LexLess(s, t) <==> !NotLexLess(s, t);
+ ensures LexLess(s, t) <==> !NotLexLess(s, t)
{
if !NotLexLess(s, t) {
EquivalenceTheorem0(s, t);
@@ -61,8 +69,8 @@ lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
}
}
colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
- requires !NotLexLess(s, t);
- ensures LexLess(s, t);
+ requires !NotLexLess(s, t)
+ ensures LexLess(s, t)
{
// Here, more needs to be said about the way Dafny handles co-lemmas.
// The way a co-lemma establishes a co-predicate is to prove, by induction,
@@ -74,14 +82,14 @@ colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
// indicates a finite unrolling of a co-inductive predicate. In particular,
// LexLess#[k] refers to k unrollings of LexLess.
lemma EquivalenceTheorem0_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires !NotLexLess'(k, s, t);
- ensures LexLess#[k](s, t);
+ requires !NotLexLess'(k, s, t)
+ ensures LexLess#[k](s, t)
{
// This simple inductive proof is done completely automatically by Dafny.
}
lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess(s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess(s, t)
{
// The forall statement in Dafny is used, here, as universal introduction:
// what EquivalenceTheorem1_Lemma establishes for one k, the forall
@@ -91,22 +99,22 @@ lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
}
}
lemma EquivalenceTheorem1_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess'(k, s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess'(k, s, t)
{
}
lemma Theorem1_Alt(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess(s, u);
- ensures NotLexLess(s, t) || NotLexLess(t, u);
+ requires NotLexLess(s, u)
+ ensures NotLexLess(s, t) || NotLexLess(t, u)
{
forall k: nat | NotLexLess'(k, s, u) {
Theorem1_Alt_Lemma(k, s, t, u);
}
}
lemma Theorem1_Alt_Lemma(k: nat, s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess'(k, s, u);
- ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u);
+ requires NotLexLess'(k, s, u)
+ ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u)
{
}
@@ -116,8 +124,8 @@ function PointwiseAdd(s: Stream<int>, t: Stream<int>): Stream<int>
}
colemma Theorem2_Pointwise_Addition_Is_Monotone(s: Stream<int>, t: Stream<int>, u: Stream<int>, v: Stream<int>)
- requires LexLess(s, t) && LexLess(u, v);
- ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v));
+ requires LexLess(s, t) && LexLess(u, v)
+ ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v))
{
// The co-lemma will establish the co-inductive predicate by establishing
// all finite unrollings thereof. Each finite unrolling is proved by
@@ -148,15 +156,9 @@ copredicate Subtype(a: RecType, b: RecType)
}
colemma Theorem3_Subtype_Is_Transitive(a: RecType, b: RecType, c: RecType)
- requires Subtype(a, b) && Subtype(b, c);
- ensures Subtype(a, c);
-{
- if a == Bottom || c == Top {
- // done
- } else {
- Theorem3_Subtype_Is_Transitive(c.dom, b.dom, a.dom);
- Theorem3_Subtype_Is_Transitive(a.ran, b.ran, c.ran);
- }
+ requires Subtype(a, b) && Subtype(b, c)
+ ensures Subtype(a, c)
+{
}
// --------------------------------------------------------------------------
@@ -184,16 +186,16 @@ copredicate ValBelow(u: Val, v: Val)
}
colemma Theorem4a_ClEnvBelow_Is_Transitive(c: ClEnv, d: ClEnv, e: ClEnv)
- requires ClEnvBelow(c, d) && ClEnvBelow(d, e);
- ensures ClEnvBelow(c, e);
+ requires ClEnvBelow(c, d) && ClEnvBelow(d, e)
+ ensures ClEnvBelow(c, e)
{
forall y | y in c.m {
Theorem4b_ValBelow_Is_Transitive#[_k-1](c.m[y], d.m[y], e.m[y]);
}
}
colemma Theorem4b_ValBelow_Is_Transitive(u: Val, v: Val, w: Val)
- requires ValBelow(u, v) && ValBelow(v, w);
- ensures ValBelow(u, w);
+ requires ValBelow(u, v) && ValBelow(v, w)
+ ensures ValBelow(u, w)
{
if u.ValCl? {
Theorem4a_ClEnvBelow_Is_Transitive(u.cl.env, v.cl.env, w.cl.env);
@@ -209,7 +211,7 @@ predicate IsCapsule(cap: Capsule)
}
function ClosureConversion(cap: Capsule): Cl
- requires IsCapsule(cap);
+ requires IsCapsule(cap)
{
Closure(cap.e.abs, ClosureConvertedMap(cap.s))
// In the Kozen and Silva paper, there are more conditions, having to do with free variables,
@@ -229,8 +231,8 @@ predicate CapsuleEnvironmentBelow(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAb
}
colemma Theorem5_ClosureConversion_Is_Monotone(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAbs>)
- requires CapsuleEnvironmentBelow(s, t);
- ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t));
+ requires CapsuleEnvironmentBelow(s, t)
+ ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t))
{
}
@@ -251,8 +253,8 @@ copredicate Bisim(s: Stream, t: Stream)
}
colemma Theorem6_Bisim_Is_Symmetric(s: Stream, t: Stream)
- requires Bisim(s, t);
- ensures Bisim(t, s);
+ requires Bisim(s, t)
+ ensures Bisim(t, s)
{
// proof is automatic
}
@@ -270,8 +272,8 @@ function merge(s: Stream, t: Stream): Stream
// In general, the termination argument needs to be supplied explicitly in terms
// of a metric, rank, variant function, or whatever you want to call it--a
// "decreases" clause in Dafny. Dafny provides some help in making up "decreases"
-// clauses, and in this case it automatically adds "decreases 0;" to SplitLeft
-// and "decreases 1;" to SplitRight. With these "decreases" clauses, the
+// clauses, and in this case it automatically adds "decreases 0" to SplitLeft
+// and "decreases 1" to SplitRight. With these "decreases" clauses, the
// termination check of SplitRight's call to SplitLeft will simply be "0 < 1",
// which is trivial to check.
function SplitLeft(s: Stream): Stream
@@ -284,7 +286,7 @@ function SplitRight(s: Stream): Stream
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
- ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s);
+ ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s)
{
var LHS := merge(SplitLeft(s), SplitRight(s));
// The construct that follows is a "calc" statement. It gives a way to write an
@@ -318,7 +320,7 @@ colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Equal(s: Stream)
- ensures merge(SplitLeft(s), SplitRight(s)) == s;
+ ensures merge(SplitLeft(s), SplitRight(s)) == s
{
// The proof of this co-lemma is actually done completely automatically (so the
// body of this co-lemma can be empty). However, just to show what the calculations
diff --git a/Test/dafny4/KozenSilva.dfy.expect b/Test/dafny4/KozenSilva.dfy.expect
index c6c90498..90432af3 100644
--- a/Test/dafny4/KozenSilva.dfy.expect
+++ b/Test/dafny4/KozenSilva.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 47 verified, 0 errors
+Dafny program verifier finished with 49 verified, 0 errors
diff --git a/Test/dafny4/NipkowKlein-chapter7.dfy b/Test/dafny4/NipkowKlein-chapter7.dfy
index 7db31cbd..e694fc4b 100644
--- a/Test/dafny4/NipkowKlein-chapter7.dfy
+++ b/Test/dafny4/NipkowKlein-chapter7.dfy
@@ -112,13 +112,6 @@ inductive lemma lemma_7_6(b: bexp, c: com, c': com, s: state, t: state)
requires big_step(While(b, c), s, t) && equiv_c(c, c')
ensures big_step(While(b, c'), s, t)
{
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| big_step#[_k-1](c, s, s') && big_step#[_k-1](While(b, c), s', t);
- lemma_7_6(b, c, c', s', t); // induction hypothesis
- assert big_step(While(b, c'), s', t);
- }
}
// equiv_c is an equivalence relation
@@ -139,27 +132,7 @@ inductive lemma IMP_is_deterministic(c: com, s: state, t: state, t': state)
requires big_step(c, s, t) && big_step(c, s, t')
ensures t == t'
{
- match c
- case SKIP =>
- // trivial
- case Assign(x, a) =>
- // trivial
- case Seq(c0, c1) =>
- var s' :| big_step#[_k-1](c0, s, s') && big_step#[_k-1](c1, s', t);
- var s'' :| big_step#[_k-1](c0, s, s'') && big_step#[_k-1](c1, s'', t');
- IMP_is_deterministic(c0, s, s', s'');
- IMP_is_deterministic(c1, s', t, t');
- case If(b, thn, els) =>
- IMP_is_deterministic(if bval(b, s) then thn else els, s, t, t');
- case While(b, body) =>
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| big_step#[_k-1](body, s, s') && big_step#[_k-1](While(b, body), s', t);
- var s'' :| big_step#[_k-1](body, s, s'') && big_step#[_k-1](While(b, body), s'', t');
- IMP_is_deterministic(body, s, s', s'');
- IMP_is_deterministic(While(b, body), s', t, t');
- }
+ // Dafny totally rocks!
}
// ----- Small-step semantics -----
@@ -214,12 +187,6 @@ inductive lemma star_transitive_aux(c0: com, s0: state, c1: com, s1: state, c2:
requires small_step_star(c0, s0, c1, s1)
ensures small_step_star(c1, s1, c2, s2) ==> small_step_star(c0, s0, c2, s2)
{
- if c0 == c1 && s0 == s1 {
- } else {
- var c', s' :|
- small_step(c0, s0, c', s') && small_step_star#[_k-1](c', s', c1, s1);
- star_transitive_aux(c', s', c1, s1, c2, s2);
- }
}
// The big-step semantics can be simulated by some number of small steps
@@ -240,15 +207,14 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
small_step_star(Seq(c0, c1), s, Seq(SKIP, c1), s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ lemma_7_13(c0, s, SKIP, s', c1); }
small_step_star(c0, s, SKIP, s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
- { BigStep_implies_SmallStepStar(c0, s, s'); }
+ //{ BigStep_implies_SmallStepStar(c0, s, s'); }
small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ assert small_step(Seq(SKIP, c1), s', c1, s'); }
small_step_star(c1, s', SKIP, t);
- { BigStep_implies_SmallStepStar(c1, s', t); }
+ //{ BigStep_implies_SmallStepStar(c1, s', t); }
true;
}
case If(b, thn, els) =>
- BigStep_implies_SmallStepStar(if bval(b, s) then thn else els, s, t);
case While(b, body) =>
if !bval(b, s) && s == t {
calc <== {
@@ -271,11 +237,11 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
small_step_star(Seq(body, While(b, body)), s, Seq(SKIP, While(b, body)), s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ lemma_7_13(body, s, SKIP, s', While(b, body)); }
small_step_star(body, s, SKIP, s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
- { BigStep_implies_SmallStepStar(body, s, s'); }
+ //{ BigStep_implies_SmallStepStar(body, s, s'); }
small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ assert small_step(Seq(SKIP, While(b, body)), s', While(b, body), s'); }
small_step_star(While(b, body), s', SKIP, t);
- { BigStep_implies_SmallStepStar(While(b, body), s', t); }
+ //{ BigStep_implies_SmallStepStar(While(b, body), s', t); }
true;
}
}
@@ -299,7 +265,6 @@ inductive lemma SmallStepStar_implies_BigStep(c: com, s: state, t: state)
if c == SKIP && s == t {
} else {
var c', s' :| small_step(c, s, c', s') && small_step_star#[_k-1](c', s', SKIP, t);
- SmallStepStar_implies_BigStep(c', s', t);
SmallStep_plus_BigStep(c, s, c', s', t);
}
}
@@ -316,7 +281,6 @@ inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t:
var c0' :| c' == Seq(c0', c1) && small_step(c0, s, c0', s');
if big_step(c', s', t) {
var s'' :| big_step(c0', s', s'') && big_step(c1, s'', t);
- SmallStep_plus_BigStep(c0, s, c0', s', s'');
}
}
case If(b, thn, els) =>
diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy
index 3dba6325..0d6cffa1 100644
--- a/Test/dafny4/NumberRepresentations.dfy
+++ b/Test/dafny4/NumberRepresentations.dfy
@@ -234,17 +234,16 @@ lemma UniqueRepresentation(a: seq<int>, b: seq<int>, lowDigit: int, base: nat)
}
lemma ZeroIsUnique(a: seq<int>, lowDigit: int, base: nat)
- requires 2 <= base && lowDigit <= 0 < lowDigit + base;
- requires a == trim(a);
- requires IsSkewNumber(a, lowDigit, base);
- requires eval(a, base) == 0;
- ensures a == [];
+ requires 2 <= base && lowDigit <= 0 < lowDigit + base
+ requires a == trim(a)
+ requires IsSkewNumber(a, lowDigit, base)
+ requires eval(a, base) == 0
+ ensures a == []
{
if a != [] {
- assert eval(a, base) == a[0] + base * eval(a[1..], base);
if eval(a[1..], base) == 0 {
TrimProperty(a);
- ZeroIsUnique(a[1..], lowDigit, base);
+ // ZeroIsUnique(a[1..], lowDigit, base);
}
assert false;
}
diff --git a/Test/vstte2012/Tree.dfy b/Test/vstte2012/Tree.dfy
index a346aac5..662024e4 100644
--- a/Test/vstte2012/Tree.dfy
+++ b/Test/vstte2012/Tree.dfy
@@ -22,9 +22,9 @@ datatype Result = Fail | Res(t: Tree, sOut: seq<int>)
// The postconditions state properties that are needed
// in the completeness proof.
function toList(d: int, t: Tree): seq<int>
- ensures toList(d, t) != [] && toList(d, t)[0] >= d;
- ensures (toList(d, t)[0] == d) == (t == Leaf);
- decreases t;
+ ensures toList(d, t) != [] && toList(d, t)[0] >= d
+ ensures (toList(d, t)[0] == d) == (t == Leaf)
+ decreases t
{
match t
case Leaf => [d]
@@ -43,10 +43,10 @@ function toList(d: int, t: Tree): seq<int>
function method build_rec(d: int, s: seq<int>): Result
ensures build_rec(d, s).Res? ==>
|build_rec(d, s).sOut| < |s| &&
- build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..];
+ build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..]
ensures build_rec(d, s).Res? ==>
- toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|];
- decreases |s|, (if s==[] then 0 else s[0]-d);
+ toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|]
+ decreases |s|, (if s==[] then 0 else s[0]-d)
{
if s==[] || s[0] < d then
Fail
@@ -68,7 +68,7 @@ function method build_rec(d: int, s: seq<int>): Result
// sequence yields exactly the input sequence.
// Completeness is proved as a lemma, see below.
function method build(s: seq<int>): Result
- ensures build(s).Res? ==> toList(0,build(s).t) == s;
+ ensures build(s).Res? ==> toList(0,build(s).t) == s
{
var r := build_rec(0, s);
if r.Res? && r.sOut == [] then r else Fail
@@ -86,16 +86,14 @@ function method build(s: seq<int>): Result
// correctly (encoded by calls to this lemma).
lemma lemma0(t: Tree, d: int, s: seq<int>)
ensures build_rec(d, toList(d, t) + s).Res? &&
- build_rec(d, toList(d, t) + s).sOut == s;
+ build_rec(d, toList(d, t) + s).sOut == s
{
match(t) {
case Leaf =>
assert toList(d, t) == [d];
case Node(l, r) =>
assert toList(d, t) + s == toList(d+1, l) + (toList(d+1, r) + s);
-
- lemma0(l, d+1, toList(d+1, r) + s); // apply the induction hypothesis
- lemma0(r, d+1, s); // apply the induction hypothesis
+ // the rest follows from (two invocations of) the (automatically applied) induction hypothesis
}
}
@@ -107,8 +105,8 @@ lemma lemma0(t: Tree, d: int, s: seq<int>)
// the following two lemmas replace these arguments
// by quantified variables.
lemma lemma1(t: Tree, s:seq<int>)
- requires s == toList(0, t) + [];
- ensures build(s).Res?;
+ requires s == toList(0, t) + []
+ ensures build(s).Res?
{
lemma0(t, 0, []);
}
@@ -117,7 +115,7 @@ lemma lemma1(t: Tree, s:seq<int>)
// This lemma introduces the existential quantifier in the completeness
// property.
lemma lemma2(s: seq<int>)
- ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?;
+ ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?
{
forall t | toList(0,t) == s {
lemma1(t, s);
@@ -131,7 +129,7 @@ lemma lemma2(s: seq<int>)
// The body of the method converts the argument of lemma2
// into a universally quantified variable.
lemma completeness()
- ensures forall s: seq<int> :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?);
+ ensures forall s: seq<int> :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?)
{
forall s {
lemma2(s);
@@ -145,7 +143,7 @@ lemma completeness()
// unfold the necessary definitions.
method harness0()
ensures build([1,3,3,2]).Res? &&
- build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf));
+ build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf))
{
}
@@ -155,6 +153,6 @@ method harness0()
// assertions are required by the verifier to
// unfold the necessary definitions.
method harness1()
- ensures build([1,3,2,2]).Fail?;
+ ensures build([1,3,2,2]).Fail?
{
}