summaryrefslogtreecommitdiff
path: root/Test/dafny4
diff options
context:
space:
mode:
Diffstat (limited to 'Test/dafny4')
-rw-r--r--Test/dafny4/ACL2-extractor.dfy8
-rw-r--r--Test/dafny4/Ackermann.dfy235
-rw-r--r--Test/dafny4/Ackermann.dfy.expect6
-rw-r--r--Test/dafny4/BinarySearch.dfy.expect2
-rw-r--r--Test/dafny4/Bug100.dfy23
-rw-r--r--Test/dafny4/Bug100.dfy.expect2
-rw-r--r--Test/dafny4/Bug101.dfy19
-rw-r--r--Test/dafny4/Bug101.dfy.expect8
-rw-r--r--Test/dafny4/Bug103.dfy20
-rw-r--r--Test/dafny4/Bug103.dfy.expect2
-rw-r--r--Test/dafny4/Bug104.dfy12
-rw-r--r--Test/dafny4/Bug104.dfy.expect3
-rw-r--r--Test/dafny4/Bug107.dfy16
-rw-r--r--Test/dafny4/Bug107.dfy.expect6
-rw-r--r--Test/dafny4/Bug108.dfy11
-rw-r--r--Test/dafny4/Bug108.dfy.expect7
-rw-r--r--Test/dafny4/Bug110.dfy31
-rw-r--r--Test/dafny4/Bug110.dfy.expect2
-rw-r--r--Test/dafny4/Bug111.dfy18
-rw-r--r--Test/dafny4/Bug111.dfy.expect2
-rw-r--r--Test/dafny4/Bug113.dfy10
-rw-r--r--Test/dafny4/Bug113.dfy.expect6
-rw-r--r--Test/dafny4/Bug114.dfy10
-rw-r--r--Test/dafny4/Bug114.dfy.expect7
-rw-r--r--Test/dafny4/Bug116.dfy15
-rw-r--r--Test/dafny4/Bug116.dfy.expect3
-rw-r--r--Test/dafny4/Bug117.dfy37
-rw-r--r--Test/dafny4/Bug117.dfy.expect3
-rw-r--r--Test/dafny4/Bug118.dfy12
-rw-r--r--Test/dafny4/Bug118.dfy.expect2
-rw-r--r--Test/dafny4/Bug120.dfy11
-rw-r--r--Test/dafny4/Bug120.dfy.expect2
-rw-r--r--Test/dafny4/Bug121.dfy18
-rw-r--r--Test/dafny4/Bug121.dfy.expect2
-rw-r--r--Test/dafny4/Bug122.dfy17
-rw-r--r--Test/dafny4/Bug122.dfy.expect2
-rw-r--r--Test/dafny4/Bug124.dfy14
-rw-r--r--Test/dafny4/Bug124.dfy.expect2
-rw-r--r--Test/dafny4/Bug125.dfy58
-rw-r--r--Test/dafny4/Bug125.dfy.expect2
-rw-r--r--Test/dafny4/Bug128.dfy13
-rw-r--r--Test/dafny4/Bug128.dfy.expect2
-rw-r--r--Test/dafny4/Bug129.dfy12
-rw-r--r--Test/dafny4/Bug129.dfy.expect2
-rw-r--r--Test/dafny4/Bug131.dfy11
-rw-r--r--Test/dafny4/Bug131.dfy.expect2
-rw-r--r--Test/dafny4/Bug132.dfy45
-rw-r--r--Test/dafny4/Bug132.dfy.expect18
-rw-r--r--Test/dafny4/Bug133.dfy18
-rw-r--r--Test/dafny4/Bug133.dfy.expect2
-rw-r--r--Test/dafny4/Bug134.dfy23
-rw-r--r--Test/dafny4/Bug134.dfy.expect4
-rw-r--r--Test/dafny4/Bug136.dfy12
-rw-r--r--Test/dafny4/Bug136.dfy.expect2
-rw-r--r--Test/dafny4/Bug138.dfy22
-rw-r--r--Test/dafny4/Bug138.dfy.expect2
-rw-r--r--Test/dafny4/Bug139.dfy25
-rw-r--r--Test/dafny4/Bug139.dfy.expect2
-rw-r--r--Test/dafny4/Bug140.dfy67
-rw-r--r--Test/dafny4/Bug140.dfy.expect6
-rw-r--r--Test/dafny4/Bug148.dfy25
-rw-r--r--Test/dafny4/Bug148.dfy.expect17
-rw-r--r--Test/dafny4/Bug49.dfy74
-rw-r--r--Test/dafny4/Bug49.dfy.expect8
-rw-r--r--Test/dafny4/Bug60.dfy4
-rw-r--r--Test/dafny4/Bug63.dfy4
-rw-r--r--Test/dafny4/Bug73.dfy.expect4
-rw-r--r--Test/dafny4/Bug75.dfy50
-rw-r--r--Test/dafny4/Bug75.dfy.expect2
-rw-r--r--Test/dafny4/Bug79.dfy10
-rw-r--r--Test/dafny4/Bug79.dfy.expect2
-rw-r--r--Test/dafny4/Bug81.dfy9
-rw-r--r--Test/dafny4/Bug81.dfy.expect2
-rw-r--r--Test/dafny4/Bug82.dfy11
-rw-r--r--Test/dafny4/Bug82.dfy.expect2
-rw-r--r--Test/dafny4/Bug88.dfy18
-rw-r--r--Test/dafny4/Bug88.dfy.expect11
-rw-r--r--Test/dafny4/Bug89.dfy15
-rw-r--r--Test/dafny4/Bug89.dfy.expect6
-rw-r--r--Test/dafny4/Bug91.dfy40
-rw-r--r--Test/dafny4/Bug91.dfy.expect2
-rw-r--r--Test/dafny4/Bug93.dfy37
-rw-r--r--Test/dafny4/Bug93.dfy.expect8
-rw-r--r--Test/dafny4/Bug94.dfy35
-rw-r--r--Test/dafny4/Bug94.dfy.expect6
-rw-r--r--Test/dafny4/Bug99.dfy11
-rw-r--r--Test/dafny4/Bug99.dfy.expect2
-rw-r--r--Test/dafny4/Circ.dfy2
-rw-r--r--Test/dafny4/CoqArt-InsertionSort.dfy1
-rw-r--r--Test/dafny4/FlyingRobots.dfy285
-rw-r--r--Test/dafny4/FlyingRobots.dfy.expect5
-rw-r--r--Test/dafny4/Fstar-QuickSort.dfy40
-rw-r--r--Test/dafny4/GHC-MergeSort.dfy9
-rw-r--r--Test/dafny4/KozenSilva.dfy80
-rw-r--r--Test/dafny4/KozenSilva.dfy.expect2
-rw-r--r--Test/dafny4/LargeConstants.dfy14
-rw-r--r--Test/dafny4/LargeConstants.dfy.expect2
-rw-r--r--Test/dafny4/Leq.dfy174
-rw-r--r--Test/dafny4/Leq.dfy.expect3
-rw-r--r--Test/dafny4/McCarthy91.dfy86
-rw-r--r--Test/dafny4/McCarthy91.dfy.expect25
-rw-r--r--Test/dafny4/MonadicLaws.dfy100
-rw-r--r--Test/dafny4/MonadicLaws.dfy.expect2
-rw-r--r--Test/dafny4/NipkowKlein-chapter3.dfy17
-rw-r--r--Test/dafny4/NipkowKlein-chapter3.dfy.expect2
-rw-r--r--Test/dafny4/NipkowKlein-chapter7.dfy164
-rw-r--r--Test/dafny4/NipkowKlein-chapter7.dfy.expect2
-rw-r--r--Test/dafny4/NumberRepresentations.dfy19
-rw-r--r--Test/dafny4/Primes.dfy17
-rw-r--r--Test/dafny4/Regression0.dfy13
-rw-r--r--Test/dafny4/Regression0.dfy.expect3
-rw-r--r--Test/dafny4/Regression1.dfy9
-rw-r--r--Test/dafny4/Regression1.dfy.expect2
-rw-r--r--Test/dafny4/SoftwareFoundations-Basics.dfy.expect2
-rw-r--r--Test/dafny4/UnionFind.dfy332
-rw-r--r--Test/dafny4/UnionFind.dfy.expect3
-rw-r--r--Test/dafny4/set-compr.dfy54
-rw-r--r--Test/dafny4/set-compr.dfy.expect17
118 files changed, 2609 insertions, 248 deletions
diff --git a/Test/dafny4/ACL2-extractor.dfy b/Test/dafny4/ACL2-extractor.dfy
index 8fe98531..bd2c9d83 100644
--- a/Test/dafny4/ACL2-extractor.dfy
+++ b/Test/dafny4/ACL2-extractor.dfy
@@ -116,9 +116,9 @@ lemma RevLength(xs: List)
// you can prove two lists equal by proving their elements equal
lemma EqualElementsMakeEqualLists(xs: List, ys: List)
- requires length(xs) == length(ys);
- requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys);
- ensures xs == ys;
+ requires length(xs) == length(ys)
+ requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys)
+ ensures xs == ys
{
match xs {
case Nil =>
@@ -132,7 +132,7 @@ lemma EqualElementsMakeEqualLists(xs: List, ys: List)
nth(i+1, xs) == nth(i+1, ys);
}
}
- EqualElementsMakeEqualLists(xs.tail, ys.tail);
+ // EqualElementsMakeEqualLists(xs.tail, ys.tail);
}
}
diff --git a/Test/dafny4/Ackermann.dfy b/Test/dafny4/Ackermann.dfy
new file mode 100644
index 00000000..7ab686e0
--- /dev/null
+++ b/Test/dafny4/Ackermann.dfy
@@ -0,0 +1,235 @@
+// RUN: %dafny /compile:3 /rprint:"%t.rprint" /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+// Rustan Leino, 8 Sep 2015.
+// This file proves that the Ackermann function is monotonic in each argument
+
+method Main() {
+ // Note, don't try much larger numbers unless you're prepared to wait!
+ print "Ack(3, 4) = ", Ack(3, 4), "\n";
+}
+
+// Here is the definition of the Ackermann function:
+function method Ack(m: nat, n: nat): nat
+{
+ if m == 0 then
+ n + 1
+ else if n == 0 then
+ Ack(m - 1, 1)
+ else
+ Ack(m - 1, Ack(m, n - 1))
+}
+
+// And here is the theorem that proves the desired result:
+lemma AckermannIsMonotonic(m: nat, n: nat, m': nat, n': nat)
+ requires m <= m' && n <= n'
+ ensures Ack(m, n) <= Ack(m', n')
+{
+ // This is the proof. It calls two lemmas, stated and proved below.
+ MonotonicN(m, n, n');
+ MonotonicM(m, n, m');
+}
+
+// --------------------------------------------------------
+// What follows are the supporting lemmas and their proofs.
+// --------------------------------------------------------
+
+// Proving that Ackermann is monotonic in its second argument is easy.
+lemma MonotonicN(m: nat, n: nat, n': nat)
+ requires n <= n'
+ ensures Ack(m, n) <= Ack(m, n')
+{
+ // In fact, Dafny completes this proof automatically.
+}
+
+// To prove the other direction, we consider an alternative formulation
+// of the Ackermann function, namely a curried form:
+function CurriedAckermann(m: int, n: int): int
+{
+ A(m)(n)
+}
+
+function A(m: int): int -> int
+{
+ if m <= 0 then
+ n => n + 1
+ else
+ n => Iter(A(m-1), n)
+}
+
+function Iter(f: int -> int, n: int): int
+ requires IsTotal(f)
+ decreases n
+{
+ if n <= 0 then
+ f(1)
+ else
+ f(Iter(f, n-1))
+}
+
+// In the 3-part definition above, function Iter needs to know that it can
+// apply its function parameter "f". Therefore, Iter's precondition says that
+// "f" must be total. We define totality of "f" by saying that its precondition
+// holds for any n and that it never reads the heap:
+predicate IsTotal(f: int -> int)
+ reads f.reads
+{
+ forall n :: f.requires(n) && f.reads(n) == {}
+}
+
+// Now, we can prove certain things about CurriedAckermann. The first thing we
+// prove is that it gives the same result as the Ack function above.
+
+lemma CurriedIsTheSame(m: nat, n: nat)
+ ensures CurriedAckermann(m, n) == Ack(m, n)
+{
+ // The proof considers 3 cases, following the 3 cases in the definition of Ack
+ if m == 0 {
+ // trivial
+ } else if n == 0 {
+ // trivial
+ } else {
+ // we help Dafny out with the following lemma:
+ assert A(m)(n) == A(m-1)(Iter(A(m-1), n-1));
+ }
+}
+
+// Monotonicity in the first argument of Ackermann now follows from the fact that,
+// for m <= m', A(m) is a function that is always below A(m')
+lemma MonotonicM(m: nat, n: nat, m': nat)
+ requires m <= m'
+ ensures Ack(m, n) <= Ack(m', n)
+{
+ CurriedIsTheSame(m, n);
+ CurriedIsTheSame(m', n);
+ ABelow(m, m');
+}
+
+// We must now prove ABelow. To do that, we will prove some properties of A and of
+// Iter. Let us define the three properties we will reason about.
+
+// The first property is a relation on functions. It is the property that above was referred
+// to as "f is a function that is always below g". The lemma ABelow(m, m') used above establishes
+// FunctionBelow(A(m), A(m')).
+predicate FunctionBelow(f: int -> int, g: int -> int)
+ requires IsTotal(f) && IsTotal(g)
+{
+ forall n :: f(n) <= g(n)
+}
+
+// The next property says that a function is monotonic.
+predicate FunctionMonotonic(f: int -> int)
+ requires IsTotal(f)
+{
+ forall n,n' :: n <= n' ==> f(n) <= f(n')
+}
+
+// The third property says that a function's return value is strictly greater than its argument.
+predicate Expanding(f: int -> int)
+ requires IsTotal(f)
+{
+ forall n :: n < f(n)
+}
+
+// We will prove that A(_) satisfies the properties FunctionBelow, FunctionMonotonic, and
+// FunctionExpanding. But first we prove three properties of Iter.
+
+// Provided that "f" is monotonic and expanding, Iter(f, _) is monotonic.
+lemma IterIsMonotonicN(f: int -> int, n: int, n': int)
+ requires IsTotal(f) && Expanding(f) && FunctionMonotonic(f) && n <= n'
+ ensures Iter(f, n) <= Iter(f, n')
+{
+ // This proof is a simple induction over n' and Dafny completes the proof automatically.
+}
+
+// Next, we prove that Iter(_, n) is monotonic. That is, given functions "f" and "g"
+// such that FunctionBelow(f, g), we have Iter(f, n) <= Iter(g, n). The lemma requires
+// "f" to be monotonic, but we don't have to state the same property for g.
+lemma IterIsMonotonicF(f: int -> int, g: int -> int, n: int)
+ requires IsTotal(f) && IsTotal(g) && FunctionMonotonic(f) && FunctionBelow(f, g)
+ ensures Iter(f, n) <= Iter(g, n)
+{
+ // This proof is a simple induction over n and Dafny completes the proof automatically.
+}
+
+// Finally, we shows that for any expanding function "f", Iter(f, _) is also expanding.
+lemma IterExpanding(f: int -> int, n: int)
+ requires IsTotal(f) && Expanding(f)
+ ensures n < Iter(f, n)
+{
+ // Here, too, the proof is a simple induction of n and Dafny completes it automatically.
+}
+
+// We complete the proof by showing that A(_) has the three properties we need.
+
+// Of the three properties we prove about A(_), we start by showing that A(m) returns a
+// function that is expanding.
+lemma AExp(m: int)
+ ensures Expanding(A(m))
+{
+ if m <= 0 {
+ // trivial
+ } else {
+ // This branch of the proof follows from the fact that Iter(A(m-1), _) is expanding.
+ forall n {
+ // The following lemma requires A(m-1) to be expanding, which is something that
+ // following from our induction hypothesis.
+ IterExpanding(A(m-1), n);
+ }
+ }
+}
+
+// Next, we show that A(m) returns a monotonic function.
+lemma Am(m: int)
+ ensures FunctionMonotonic(A(m))
+{
+ if m <= 0 {
+ // trivial
+ } else {
+ // We make use of the fact that A(m-1) is expanding:
+ AExp(m-1);
+ // and the fact that Iter(A(m-1), _) is monotonic:
+ forall n,n' | n <= n' {
+ // The following lemma requires A(m-1) to be expanding and monotonic.
+ // The former comes from the invocation of AExp(m-1) above, and the
+ // latter comes from our induction hypothesis.
+ IterIsMonotonicN(A(m-1), n, n');
+ }
+ }
+}
+
+// Our final property about A, which is the lemma we had used above to prove
+// that Ackermann is monotonic in its first argument, is stated and proved here:
+lemma ABelow(m: int, m': int)
+ requires m <= m'
+ ensures FunctionBelow(A(m), A(m'))
+{
+ if m' <= 0 {
+ // trivial
+ } else if m <= 0 {
+ forall n {
+ calc {
+ A(m)(n);
+ ==
+ n + 1;
+ <= { AExp(m'-1); IterExpanding(A(m'-1), n); }
+ Iter(A(m'-1), n);
+ ==
+ A(m')(n);
+ }
+ }
+ } else {
+ forall n {
+ calc {
+ A(m)(n);
+ ==
+ Iter(A(m-1), n);
+ <= { Am(m-1); ABelow(m-1, m'-1);
+ IterIsMonotonicF(A(m-1), A(m'-1), n); }
+ Iter(A(m'-1), n);
+ ==
+ A(m')(n);
+ }
+ }
+ }
+}
diff --git a/Test/dafny4/Ackermann.dfy.expect b/Test/dafny4/Ackermann.dfy.expect
new file mode 100644
index 00000000..158f2a48
--- /dev/null
+++ b/Test/dafny4/Ackermann.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 30 verified, 0 errors
+Program compiled successfully
+Running...
+
+Ack(3, 4) = 125
diff --git a/Test/dafny4/BinarySearch.dfy.expect b/Test/dafny4/BinarySearch.dfy.expect
index 944f677a..a9f834b7 100644
--- a/Test/dafny4/BinarySearch.dfy.expect
+++ b/Test/dafny4/BinarySearch.dfy.expect
@@ -1,4 +1,4 @@
-BinarySearch.dfy(44,20): Error: result of operation might violate newtype constraint
+BinarySearch.dfy(44,19): Error: result of operation might violate newtype constraint
Execution trace:
(0,0): anon0
BinarySearch.dfy(40,3): anon18_LoopHead
diff --git a/Test/dafny4/Bug100.dfy b/Test/dafny4/Bug100.dfy
new file mode 100644
index 00000000..0c971e82
--- /dev/null
+++ b/Test/dafny4/Bug100.dfy
@@ -0,0 +1,23 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+lemma lemma_ensures(x:int, RefineInt:int->int)
+ requires forall y :: RefineInt.requires(y);
+ ensures forall y :: RefineInt(y) + x == RefineInt(x) + y;
+
+function Identity(z:int) : int
+
+lemma test()
+{
+ var v,w:int;
+ lemma_ensures(w, Identity);
+ //var RefineInt := Identity;
+ //assert RefineInt(v) == Identity(v);
+ assert Identity(v) + w == Identity(w) + v;
+}
+
+
+
+
+
+
diff --git a/Test/dafny4/Bug100.dfy.expect b/Test/dafny4/Bug100.dfy.expect
new file mode 100644
index 00000000..73ba063c
--- /dev/null
+++ b/Test/dafny4/Bug100.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 4 verified, 0 errors
diff --git a/Test/dafny4/Bug101.dfy b/Test/dafny4/Bug101.dfy
new file mode 100644
index 00000000..878ed57a
--- /dev/null
+++ b/Test/dafny4/Bug101.dfy
@@ -0,0 +1,19 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+predicate P(i:int) { true }
+
+lemma Tester()
+{
+// forall i ensures false ==> P(i) {}
+ forall i ensures P(i) <== false {}
+ assert forall i :: P(i) ==> false;
+ assert P(0);
+ assert false;
+}
+
+
+
+
+
+
diff --git a/Test/dafny4/Bug101.dfy.expect b/Test/dafny4/Bug101.dfy.expect
new file mode 100644
index 00000000..a4e5f4b3
--- /dev/null
+++ b/Test/dafny4/Bug101.dfy.expect
@@ -0,0 +1,8 @@
+Bug101.dfy(10,11): Error: assertion violation
+Execution trace:
+ (0,0): anon0
+ (0,0): anon6_Else
+ (0,0): anon7_Then
+ (0,0): anon5
+
+Dafny program verifier finished with 2 verified, 1 error
diff --git a/Test/dafny4/Bug103.dfy b/Test/dafny4/Bug103.dfy
new file mode 100644
index 00000000..559a361c
--- /dev/null
+++ b/Test/dafny4/Bug103.dfy
@@ -0,0 +1,20 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 /print:"%t.print" "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+predicate IsLessThanSuccesor(i:int)
+{
+ i < i + 1
+}
+
+lemma LemmaWithoutTriggerOnForallStatement()
+{
+ forall i
+ ensures IsLessThanSuccesor(i);
+ {
+ }
+}
+
+
+
+
+
diff --git a/Test/dafny4/Bug103.dfy.expect b/Test/dafny4/Bug103.dfy.expect
new file mode 100644
index 00000000..52595bf9
--- /dev/null
+++ b/Test/dafny4/Bug103.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
diff --git a/Test/dafny4/Bug104.dfy b/Test/dafny4/Bug104.dfy
new file mode 100644
index 00000000..ffabbb92
--- /dev/null
+++ b/Test/dafny4/Bug104.dfy
@@ -0,0 +1,12 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype PartRealPartGhost = PartRealPartGhost(x:int, ghost y:int)
+
+method UpdateField()
+{
+ var v := PartRealPartGhost(3, 4);
+ ghost var g := 5;
+ v := v[y := g];
+ v := v.(y := g);
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug104.dfy.expect b/Test/dafny4/Bug104.dfy.expect
new file mode 100644
index 00000000..1cc43cbf
--- /dev/null
+++ b/Test/dafny4/Bug104.dfy.expect
@@ -0,0 +1,3 @@
+Bug104.dfy(10,7): Warning: datatype update syntax D[f := E] is deprecated; the new syntax is D.(f := E)
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug107.dfy b/Test/dafny4/Bug107.dfy
new file mode 100644
index 00000000..56965d92
--- /dev/null
+++ b/Test/dafny4/Bug107.dfy
@@ -0,0 +1,16 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Main()
+{
+ var f := Inc;
+ print(f(4));
+}
+
+function method Inc(x: int): int
+{
+ x + 2
+}
+
+
+
diff --git a/Test/dafny4/Bug107.dfy.expect b/Test/dafny4/Bug107.dfy.expect
new file mode 100644
index 00000000..eaa3aa54
--- /dev/null
+++ b/Test/dafny4/Bug107.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
+Program compiled successfully
+Running...
+
+6 \ No newline at end of file
diff --git a/Test/dafny4/Bug108.dfy b/Test/dafny4/Bug108.dfy
new file mode 100644
index 00000000..17cb9f12
--- /dev/null
+++ b/Test/dafny4/Bug108.dfy
@@ -0,0 +1,11 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Main() {
+ var A := map[0 := 1];
+ var B := map x | x in (set y | y in A) :: A[x];
+ print A, "\n";
+ print B, "\n";
+}
+
+
diff --git a/Test/dafny4/Bug108.dfy.expect b/Test/dafny4/Bug108.dfy.expect
new file mode 100644
index 00000000..94e65ba2
--- /dev/null
+++ b/Test/dafny4/Bug108.dfy.expect
@@ -0,0 +1,7 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
+Program compiled successfully
+Running...
+
+map[0 := 1]
+map[0 := 1]
diff --git a/Test/dafny4/Bug110.dfy b/Test/dafny4/Bug110.dfy
new file mode 100644
index 00000000..aa808669
--- /dev/null
+++ b/Test/dafny4/Bug110.dfy
@@ -0,0 +1,31 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+module Io {
+ predicate AdvanceTime(oldTime:int) { oldTime > 2 }
+ class Time
+ {
+ static method GetTime()
+ ensures AdvanceTime(1);
+ }
+
+ function MaxPacketSize() : int { 65507 }
+
+ class UdpClient
+ {
+ method Receive()
+ ensures AdvanceTime(3);
+
+ method Send() returns(ok:bool)
+ requires 0 <= MaxPacketSize();
+ }
+}
+
+abstract module Host {
+ import opened Io // Doesn't work.
+ //import Io // Works
+}
+
+abstract module Main {
+ import H : Host
+}
diff --git a/Test/dafny4/Bug110.dfy.expect b/Test/dafny4/Bug110.dfy.expect
new file mode 100644
index 00000000..42fd56a5
--- /dev/null
+++ b/Test/dafny4/Bug110.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 8 verified, 0 errors
diff --git a/Test/dafny4/Bug111.dfy b/Test/dafny4/Bug111.dfy
new file mode 100644
index 00000000..730d44d3
--- /dev/null
+++ b/Test/dafny4/Bug111.dfy
@@ -0,0 +1,18 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype A = A(i:int)
+datatype B = B1(a1:A) | B2(a2:A)
+
+function f(b:B):int
+{
+ match b
+ {
+ case B1(A(i)) => i
+ case B2(A(j)) => j
+ }
+}
+
+
+
+
diff --git a/Test/dafny4/Bug111.dfy.expect b/Test/dafny4/Bug111.dfy.expect
new file mode 100644
index 00000000..c0c48e2b
--- /dev/null
+++ b/Test/dafny4/Bug111.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 1 verified, 0 errors
diff --git a/Test/dafny4/Bug113.dfy b/Test/dafny4/Bug113.dfy
new file mode 100644
index 00000000..8f5ddf9f
--- /dev/null
+++ b/Test/dafny4/Bug113.dfy
@@ -0,0 +1,10 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype D = D(q:int, r:int, s:int, t:int)
+
+method Main()
+{
+ print D(10, 20, 30, 40);
+ print "\n";
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug113.dfy.expect b/Test/dafny4/Bug113.dfy.expect
new file mode 100644
index 00000000..c4be010e
--- /dev/null
+++ b/Test/dafny4/Bug113.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
+Program compiled successfully
+Running...
+
+D.D(10, 20, 30, 40)
diff --git a/Test/dafny4/Bug114.dfy b/Test/dafny4/Bug114.dfy
new file mode 100644
index 00000000..1c0f0109
--- /dev/null
+++ b/Test/dafny4/Bug114.dfy
@@ -0,0 +1,10 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function f1(d:int):map<int,int>
+function f2(y:int, d:int):int
+
+method M(m:map<int,int>, d:int, x2:int)
+{
+ assert forall d :: f1(d) == (map x | x in m :: f2(x, d));
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug114.dfy.expect b/Test/dafny4/Bug114.dfy.expect
new file mode 100644
index 00000000..8e671f90
--- /dev/null
+++ b/Test/dafny4/Bug114.dfy.expect
@@ -0,0 +1,7 @@
+Bug114.dfy(9,9): Error: assertion violation
+Execution trace:
+ (0,0): anon0
+ (0,0): anon3_Then
+ (0,0): anon2
+
+Dafny program verifier finished with 3 verified, 1 error
diff --git a/Test/dafny4/Bug116.dfy b/Test/dafny4/Bug116.dfy
new file mode 100644
index 00000000..9fd30597
--- /dev/null
+++ b/Test/dafny4/Bug116.dfy
@@ -0,0 +1,15 @@
+// RUN: %dafny "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype struct = S // this is ok
+
+method Main()
+{
+ var s := S; // this line generates illegal C# code
+ print s;
+}
+
+
+
+
+
diff --git a/Test/dafny4/Bug116.dfy.expect b/Test/dafny4/Bug116.dfy.expect
new file mode 100644
index 00000000..b0cf7300
--- /dev/null
+++ b/Test/dafny4/Bug116.dfy.expect
@@ -0,0 +1,3 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
+Compiled assembly into Bug116.exe
diff --git a/Test/dafny4/Bug117.dfy b/Test/dafny4/Bug117.dfy
new file mode 100644
index 00000000..418746cb
--- /dev/null
+++ b/Test/dafny4/Bug117.dfy
@@ -0,0 +1,37 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+abstract module AbstractModule1
+{
+ type AbstractType1
+}
+
+abstract module AbstractModule2
+{
+ import opened AM1 : AbstractModule1
+
+ datatype AbstractType2 = AbstractType2(x:AbstractType1)
+}
+
+abstract module AbstractModule3
+{
+ import AM1 : AbstractModule1
+
+ datatype AbstractType2 = AbstractType2(x:AM1.AbstractType1)
+}
+
+module ConcreteModule1
+{
+ type AbstractType1 = int
+}
+
+module ConcreteModule2 refines AbstractModule2
+{
+ import AM1 = ConcreteModule1 // error: must be declared "opened" to match AbstratctModule2
+}
+
+module ConcreteModule3 refines AbstractModule3
+{
+ import opened AM1 = ConcreteModule1 // error: can't be declared "opened" to match AbstractModule3
+}
+
diff --git a/Test/dafny4/Bug117.dfy.expect b/Test/dafny4/Bug117.dfy.expect
new file mode 100644
index 00000000..0c5a5445
--- /dev/null
+++ b/Test/dafny4/Bug117.dfy.expect
@@ -0,0 +1,3 @@
+Bug117.dfy(28,7): Error: AM1 in ConcreteModule2 must be imported with "opened" to match the corresponding import in its refinement base AbstractModule2.
+Bug117.dfy(33,7): Error: AM1 in ConcreteModule3 cannot be imported with "opened" because it does not match the corresponding import in the refinement base AbstractModule3
+2 resolution/type errors detected in Bug117.dfy
diff --git a/Test/dafny4/Bug118.dfy b/Test/dafny4/Bug118.dfy
new file mode 100644
index 00000000..1e2dddeb
--- /dev/null
+++ b/Test/dafny4/Bug118.dfy
@@ -0,0 +1,12 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+class Foo {
+ ghost var Repr: set<object>
+}
+
+function SeqRepr(s:seq<Foo>) : set<object>
+ reads set b | b in s
+{
+ set o,b | b in s && b != null && o in b.Repr :: o // Works if you say "set b,o | ..."
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug118.dfy.expect b/Test/dafny4/Bug118.dfy.expect
new file mode 100644
index 00000000..c0c48e2b
--- /dev/null
+++ b/Test/dafny4/Bug118.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 1 verified, 0 errors
diff --git a/Test/dafny4/Bug120.dfy b/Test/dafny4/Bug120.dfy
new file mode 100644
index 00000000..73553f2a
--- /dev/null
+++ b/Test/dafny4/Bug120.dfy
@@ -0,0 +1,11 @@
+// RUN: %dafny /compile:0 /noNLarith "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function G(n: nat): nat
+{
+ if n == 0 then 5 else G(n-1)
+}
+
+method Test() {
+ assert G(10) == 5;
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug120.dfy.expect b/Test/dafny4/Bug120.dfy.expect
new file mode 100644
index 00000000..52595bf9
--- /dev/null
+++ b/Test/dafny4/Bug120.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
diff --git a/Test/dafny4/Bug121.dfy b/Test/dafny4/Bug121.dfy
new file mode 100644
index 00000000..13798fa8
--- /dev/null
+++ b/Test/dafny4/Bug121.dfy
@@ -0,0 +1,18 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Try (a:int, b:int, c:int)
+{
+ forall
+ ensures a * c == a * c;
+ ensures b * c == b * c;
+ {
+ }
+}
+
+
+
+
+
+
+
diff --git a/Test/dafny4/Bug121.dfy.expect b/Test/dafny4/Bug121.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Bug121.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug122.dfy b/Test/dafny4/Bug122.dfy
new file mode 100644
index 00000000..adbee30d
--- /dev/null
+++ b/Test/dafny4/Bug122.dfy
@@ -0,0 +1,17 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Try (a:int)
+{
+ forall
+ ensures a == a;
+ {
+ }
+}
+
+
+
+
+
+
+
diff --git a/Test/dafny4/Bug122.dfy.expect b/Test/dafny4/Bug122.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Bug122.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug124.dfy b/Test/dafny4/Bug124.dfy
new file mode 100644
index 00000000..60f26a00
--- /dev/null
+++ b/Test/dafny4/Bug124.dfy
@@ -0,0 +1,14 @@
+// RUN: %dafny /compile:0 /autoTriggers:1 /noNLarith "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function power(n:nat, e:nat) : int
+
+lemma lemma_power()
+ ensures forall n:nat, e:nat :: 0 <= n * e && power(n, e) == 5;
+{
+ forall n:nat, e:nat
+ ensures 0 <= n * e && power(n, e) == 5;
+ {
+ assume false;
+ }
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug124.dfy.expect b/Test/dafny4/Bug124.dfy.expect
new file mode 100644
index 00000000..52595bf9
--- /dev/null
+++ b/Test/dafny4/Bug124.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
diff --git a/Test/dafny4/Bug125.dfy b/Test/dafny4/Bug125.dfy
new file mode 100644
index 00000000..5dbdea6f
--- /dev/null
+++ b/Test/dafny4/Bug125.dfy
@@ -0,0 +1,58 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+abstract module AbstractModuleA
+{
+ type T
+}
+
+abstract module AbstractModuleB
+{
+ import opened AMA : AbstractModuleA
+
+ method Foo(t:T)
+}
+
+abstract module AbstractModuleC refines AbstractModuleB
+{
+ import opened AMA2 : AbstractModuleA
+}
+
+module LibA {
+ class G {
+ static function f(x:int) : bool {
+ x >= 10
+ }
+ }
+
+ function g() : bool {
+ true
+ }
+}
+
+module LibB {
+ class G {
+ static function f(x:int) : bool {
+ x < 10
+ }
+ }
+
+ function g() : bool {
+ false
+ }
+}
+
+module R {
+ import opened LibA
+}
+
+module S refines R {
+ import opened LibB
+ method m() {
+ assert g(); // should be LibA.g
+ }
+
+ method m1() {
+ assert G.f(20); // should be LibA.G.f
+ }
+}
diff --git a/Test/dafny4/Bug125.dfy.expect b/Test/dafny4/Bug125.dfy.expect
new file mode 100644
index 00000000..c87e2af2
--- /dev/null
+++ b/Test/dafny4/Bug125.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 10 verified, 0 errors
diff --git a/Test/dafny4/Bug128.dfy b/Test/dafny4/Bug128.dfy
new file mode 100644
index 00000000..b7220335
--- /dev/null
+++ b/Test/dafny4/Bug128.dfy
@@ -0,0 +1,13 @@
+// RUN: %dafny /noNLarith /z3opt:pi.warnings=true /proverWarnings:1 /compile:0 /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function GetIndexInSequence<T>(s:seq<T>, x:T) : int
+ requires x in s;
+ ensures 0 <= GetIndexInSequence(s, x) < |s|;
+ ensures s[GetIndexInSequence(s, x)] == x; {
+ var i :| 0 <= i < |s| && s[i] == x;
+ i
+ }
+
+
+
diff --git a/Test/dafny4/Bug128.dfy.expect b/Test/dafny4/Bug128.dfy.expect
new file mode 100644
index 00000000..c0c48e2b
--- /dev/null
+++ b/Test/dafny4/Bug128.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 1 verified, 0 errors
diff --git a/Test/dafny4/Bug129.dfy b/Test/dafny4/Bug129.dfy
new file mode 100644
index 00000000..f6cf0fe8
--- /dev/null
+++ b/Test/dafny4/Bug129.dfy
@@ -0,0 +1,12 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype Maybe<T> = Some(v:T) | None
+datatype B = B(b:Maybe<B>)
+
+datatype List<T> = Nil | Cons(T, List<T>)
+datatype Tree<T> = Nodes(children: List<Tree<T>>)
+
+
+
+
diff --git a/Test/dafny4/Bug129.dfy.expect b/Test/dafny4/Bug129.dfy.expect
new file mode 100644
index 00000000..a1c1f7b9
--- /dev/null
+++ b/Test/dafny4/Bug129.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 0 verified, 0 errors
diff --git a/Test/dafny4/Bug131.dfy b/Test/dafny4/Bug131.dfy
new file mode 100644
index 00000000..5d01cf59
--- /dev/null
+++ b/Test/dafny4/Bug131.dfy
@@ -0,0 +1,11 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+class Cell {
+ method Test(c: Cell) {
+ assert c.F();
+ }
+}
+
+predicate F()
+
diff --git a/Test/dafny4/Bug131.dfy.expect b/Test/dafny4/Bug131.dfy.expect
new file mode 100644
index 00000000..b4c98652
--- /dev/null
+++ b/Test/dafny4/Bug131.dfy.expect
@@ -0,0 +1,2 @@
+Bug131.dfy(6,13): Error: member F does not exist in class Cell
+1 resolution/type errors detected in Bug131.dfy
diff --git a/Test/dafny4/Bug132.dfy b/Test/dafny4/Bug132.dfy
new file mode 100644
index 00000000..b2a26876
--- /dev/null
+++ b/Test/dafny4/Bug132.dfy
@@ -0,0 +1,45 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+class Cell {
+ var data: int
+
+ predicate P()
+ reads this
+ { data < 0 }
+
+ predicate Q(e: Cell)
+ reads this, e
+ { e != null ==> e.data == data }
+
+ method Test() {
+ ghost var b;
+ var c := new Cell;
+ if {
+ // In the current state, everything is allowed:
+ case true => b := this.P();
+ case true => b := c.P();
+ case true => b := this.Q(this);
+ case true => b := this.Q(c);
+ case true => b := c.Q(this);
+ case true => b := c.Q(c);
+
+ // 'this' was allocated already in the 'old' state, so all of these are fine:
+ case true => b := old(this.P());
+ case true => b := old(P()); // same as previous line, of course
+ case true => b := old(this.Q(this));
+
+ // 'c' is freshly allocaed in this method, so it cannot be used inside 'old'
+ case true => b := old(c.P()); // error: receiver argument must be allocated in the state in which the function is invoked
+ case true => b := old(c.Q(this)); // error: receiver argument must be allocated in the state in which the function is invoked
+
+ // The same rule should apply if 'c' is a non-receiver argument
+ case true => b := old(this.Q(c)); // BOGUS: this should also generate an error
+
+ // In the following, 'c' is used as both of the two arguments. It's not allowed as either argument. However, since the error
+ // about the receiver masks the error about the other parameter, only one error (about the receiver) should be reported.
+ case true => b := old(c.Q(c)); // BOGUS: this should generate an error about the receiver
+ }
+ }
+}
+
diff --git a/Test/dafny4/Bug132.dfy.expect b/Test/dafny4/Bug132.dfy.expect
new file mode 100644
index 00000000..0452f42d
--- /dev/null
+++ b/Test/dafny4/Bug132.dfy.expect
@@ -0,0 +1,18 @@
+Bug132.dfy(33,29): Error: receiver argument must be allocated in the state in which the function is invoked
+Execution trace:
+ (0,0): anon0
+ (0,0): anon24_Then
+Bug132.dfy(34,29): Error: receiver argument must be allocated in the state in which the function is invoked
+Execution trace:
+ (0,0): anon0
+ (0,0): anon25_Then
+Bug132.dfy(37,36): Error: argument must be allocated in the state in which the function is invoked
+Execution trace:
+ (0,0): anon0
+ (0,0): anon26_Then
+Bug132.dfy(41,29): Error: receiver argument must be allocated in the state in which the function is invoked
+Execution trace:
+ (0,0): anon0
+ (0,0): anon27_Then
+
+Dafny program verifier finished with 3 verified, 4 errors
diff --git a/Test/dafny4/Bug133.dfy b/Test/dafny4/Bug133.dfy
new file mode 100644
index 00000000..f7da133e
--- /dev/null
+++ b/Test/dafny4/Bug133.dfy
@@ -0,0 +1,18 @@
+// RUN: %dafny /noNLarith /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+module Math__div_def_i {
+ function my_div_pos(x:int, d:int) : int
+ requires d > 0;
+ decreases if x < 0 then (d - x) else x;
+ {
+ if x < 0 then
+ -1 + my_div_pos(x+d, d)
+ else if x < d then
+ 0
+ else
+ 1 + my_div_pos(x-d, d)
+ }
+}
+
+
diff --git a/Test/dafny4/Bug133.dfy.expect b/Test/dafny4/Bug133.dfy.expect
new file mode 100644
index 00000000..c0c48e2b
--- /dev/null
+++ b/Test/dafny4/Bug133.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 1 verified, 0 errors
diff --git a/Test/dafny4/Bug134.dfy b/Test/dafny4/Bug134.dfy
new file mode 100644
index 00000000..fff6c36a
--- /dev/null
+++ b/Test/dafny4/Bug134.dfy
@@ -0,0 +1,23 @@
+// RUN: %dafny /compile:0 /ironDafny "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+module NativeTypes {
+ newtype{:nativeType "ushort"} uint16 = i:int | 0 <= i < 0x10000
+}
+
+abstract module AbstractModuleA
+{
+ import opened NativeTypes
+ datatype T = T(i:uint16)
+}
+
+abstract module AbstractModuleB
+{
+ import opened A as AbstractModuleA
+}
+
+abstract module AbstractModuleC
+{
+ import opened B as AbstractModuleB
+}
+
diff --git a/Test/dafny4/Bug134.dfy.expect b/Test/dafny4/Bug134.dfy.expect
new file mode 100644
index 00000000..d5d7ae53
--- /dev/null
+++ b/Test/dafny4/Bug134.dfy.expect
@@ -0,0 +1,4 @@
+Bug134.dfy(16,20): Warning: "import A as B" has been deprecated; in the new syntax, it is "import A:B"
+Bug134.dfy(21,20): Warning: "import A as B" has been deprecated; in the new syntax, it is "import A:B"
+
+Dafny program verifier finished with 4 verified, 0 errors
diff --git a/Test/dafny4/Bug136.dfy b/Test/dafny4/Bug136.dfy
new file mode 100644
index 00000000..97c9e389
--- /dev/null
+++ b/Test/dafny4/Bug136.dfy
@@ -0,0 +1,12 @@
+// RUN: %dafny /compile:0 /print:"%t.print" "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method test()
+{
+ assume false;
+ assert true;
+}
+
+
+
+
diff --git a/Test/dafny4/Bug136.dfy.expect b/Test/dafny4/Bug136.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Bug136.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug138.dfy b/Test/dafny4/Bug138.dfy
new file mode 100644
index 00000000..db0e54ef
--- /dev/null
+++ b/Test/dafny4/Bug138.dfy
@@ -0,0 +1,22 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype List = Nil | Cons(int, List)
+
+method R(xs: List)
+{
+ match xs
+ case Nil() => // currently produces a parsing error, but shouldn't
+ case Cons(x, Nil()) => // currently allowed
+ case Cons(x, Cons(y, tail)) =>
+}
+
+function F(xs: List) : int
+{
+ match xs
+ case Nil() => 0 // currently produces a parsing error, but shouldn't
+ case Cons(x, Nil()) => 1 // currently allowed
+ case Cons(x, Cons(y, tail)) => 2
+}
+
+
diff --git a/Test/dafny4/Bug138.dfy.expect b/Test/dafny4/Bug138.dfy.expect
new file mode 100644
index 00000000..52595bf9
--- /dev/null
+++ b/Test/dafny4/Bug138.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
diff --git a/Test/dafny4/Bug139.dfy b/Test/dafny4/Bug139.dfy
new file mode 100644
index 00000000..bd4ded73
--- /dev/null
+++ b/Test/dafny4/Bug139.dfy
@@ -0,0 +1,25 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+datatype List = Nil | Cons(int, List)
+
+method R(xs: List)
+{
+ var a: int;
+ var b: int;
+ match xs
+ case Nil =>
+ case Cons(a, Nil()) => // this 'a' is allowed
+ case Cons(x, Cons(b, tail)) => // this 'b' (which is in a nested position) generates an error
+}
+
+function F(xs: List): int
+{
+ var a := 4;
+ var b := 7;
+ match xs
+ case Nil => 0
+ case Cons(a, Nil()) => 1
+ case Cons(x, Cons(b, tail)) => 2
+}
+
diff --git a/Test/dafny4/Bug139.dfy.expect b/Test/dafny4/Bug139.dfy.expect
new file mode 100644
index 00000000..52595bf9
--- /dev/null
+++ b/Test/dafny4/Bug139.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 3 verified, 0 errors
diff --git a/Test/dafny4/Bug140.dfy b/Test/dafny4/Bug140.dfy
new file mode 100644
index 00000000..9a85e36c
--- /dev/null
+++ b/Test/dafny4/Bug140.dfy
@@ -0,0 +1,67 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+class Node<T> {
+ ghost var List: seq<T>;
+ ghost var Repr: set<Node<T>>;
+
+ var data: T;
+ var next: Node<T>;
+
+ predicate Valid()
+ reads this, Repr
+ {
+ this in Repr && null !in Repr &&
+ (next == null ==> List == [data]) &&
+ (next != null ==>
+ next in Repr && next.Repr <= Repr &&
+ this !in next.Repr &&
+ List == [data] + next.List &&
+ next.Valid())
+ }
+
+ constructor (d: T)
+ modifies this
+ ensures Valid() && fresh(Repr - {this})
+ ensures List == [d]
+ {
+ data, next := d, null;
+ List, Repr := [d], {this};
+ }
+
+ constructor InitAsPredecessor(d: T, succ: Node<T>)
+ requires succ != null && succ.Valid() && this !in succ.Repr;
+ modifies this;
+ ensures Valid() && fresh(Repr - {this} - succ.Repr);
+ ensures List == [d] + succ.List;
+ {
+ data, next := d, succ;
+ List := [d] + succ.List;
+ Repr := {this} + succ.Repr;
+ }
+
+ method Prepend(d: T) returns (r: Node<T>)
+ requires Valid()
+ ensures r != null && r.Valid() && fresh(r.Repr - old(Repr))
+ ensures r.List == [d] + List
+ {
+ r := new Node.InitAsPredecessor(d, this);
+ }
+
+ method Print()
+ requires Valid()
+ decreases |List|
+ {
+ print data;
+ if (next != null) {
+ next.Print();
+ }
+ }
+}
+
+method Main()
+{
+ var l2 := new Node(2);
+ var l1 := l2.Prepend(1);
+ l1.Print();
+}
diff --git a/Test/dafny4/Bug140.dfy.expect b/Test/dafny4/Bug140.dfy.expect
new file mode 100644
index 00000000..00c7d129
--- /dev/null
+++ b/Test/dafny4/Bug140.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 11 verified, 0 errors
+Program compiled successfully
+Running...
+
+12 \ No newline at end of file
diff --git a/Test/dafny4/Bug148.dfy b/Test/dafny4/Bug148.dfy
new file mode 100644
index 00000000..b7a08952
--- /dev/null
+++ b/Test/dafny4/Bug148.dfy
@@ -0,0 +1,25 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Main()
+{
+ var zero : real := 0.0;
+ var three : real := 3.0;
+ var fifteen : real := 15.0;
+ var negone : real := -1.0;
+ var negthree : real := -3.0;
+
+ print zero <= fifteen, "\n"; // true
+ print fifteen <= zero, "\n"; // false
+ print negone <= zero, "\n"; // true
+ print zero <= negone, "\n"; // false
+ print negone <= fifteen, "\n"; // true
+ print fifteen <= negone, "\n"; // false
+
+ print zero >= fifteen, "\n"; // false
+ print fifteen >= zero, "\n"; // true
+ print negone >= zero, "\n"; // false
+ print zero >= negone, "\n"; // true
+ print negone >= fifteen, "\n"; // false
+ print fifteen >= negone, "\n"; // true
+}
diff --git a/Test/dafny4/Bug148.dfy.expect b/Test/dafny4/Bug148.dfy.expect
new file mode 100644
index 00000000..7acfb169
--- /dev/null
+++ b/Test/dafny4/Bug148.dfy.expect
@@ -0,0 +1,17 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
+Program compiled successfully
+Running...
+
+True
+False
+True
+False
+True
+False
+False
+True
+False
+True
+False
+True
diff --git a/Test/dafny4/Bug49.dfy b/Test/dafny4/Bug49.dfy
new file mode 100644
index 00000000..887938d6
--- /dev/null
+++ b/Test/dafny4/Bug49.dfy
@@ -0,0 +1,74 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method Main()
+{
+ print apply(i => i + 1, 5), "\n";
+ print mapply(map[5 := 6], 5), "\n";
+ var f;
+ print five(f), "\n";
+}
+
+// -----
+// test that the definition axiom for function "apply" is available
+
+function method apply(f:int->int, a:int): int
+ reads f.reads
+ requires f.requires(a)
+{
+ f(a)
+}
+
+lemma TestPost()
+ ensures apply(i => i + 1, 5) == 6
+{
+}
+
+lemma M() {
+ assert apply(i => i + 1, 5) == 6;
+}
+
+lemma TestPre()
+ requires apply(i => i + 1, 5) == 6
+{
+}
+
+lemma TestPreCaller()
+{
+ TestPre();
+}
+
+// -----
+// test that the above thing for arrows also works for maps
+
+function method mapply(m: map<int,int>, a:int): int
+ requires a in m
+{
+ m[a]
+}
+
+lemma TestMPost()
+ ensures mapply(map[5 := 6], 5) == 6
+{
+}
+
+lemma N() {
+ assert mapply(map[5 := 6], 5) == 6;
+}
+
+// -----
+// test that g's result is known to be $Is'ed and $IsAlloc'ed
+
+function method five(f:int->int): int { 5 }
+
+lemma P() {
+ var f := i => i + 1;
+ assert five(f) == 5;
+}
+
+lemma Q(g: real->int->int)
+ requires g.requires(0.0)
+{
+ var f := g(0.0);
+ assert five(f) == 5;
+}
diff --git a/Test/dafny4/Bug49.dfy.expect b/Test/dafny4/Bug49.dfy.expect
new file mode 100644
index 00000000..653dda07
--- /dev/null
+++ b/Test/dafny4/Bug49.dfy.expect
@@ -0,0 +1,8 @@
+
+Dafny program verifier finished with 21 verified, 0 errors
+Program compiled successfully
+Running...
+
+6
+6
+5
diff --git a/Test/dafny4/Bug60.dfy b/Test/dafny4/Bug60.dfy
index 5340ad6b..c433451c 100644
--- a/Test/dafny4/Bug60.dfy
+++ b/Test/dafny4/Bug60.dfy
@@ -9,5 +9,5 @@ method Main()
print (s, m), "\n";
print (|s|, |m|), "\n";
print(set s | s in m), "\n";
- print (forall x :: x in (map [1:=10, 2:=20]) ==> x > 0), "\n";
-} \ No newline at end of file
+ print (forall x {:nowarn} :: x in (map [1:=10, 2:=20]) ==> x > 0), "\n";
+}
diff --git a/Test/dafny4/Bug63.dfy b/Test/dafny4/Bug63.dfy
index 86aad232..39cbae1b 100644
--- a/Test/dafny4/Bug63.dfy
+++ b/Test/dafny4/Bug63.dfy
@@ -8,6 +8,6 @@ method M()
method Client()
{
- assume forall o: object :: o != null ==> false;
+ assume forall o: object {:nowarn} :: o != null ==> false;
M();
-} \ No newline at end of file
+}
diff --git a/Test/dafny4/Bug73.dfy.expect b/Test/dafny4/Bug73.dfy.expect
index 6cf5c156..8beaa18c 100644
--- a/Test/dafny4/Bug73.dfy.expect
+++ b/Test/dafny4/Bug73.dfy.expect
@@ -1,9 +1,9 @@
-Bug73.dfy(7,14): Error: assertion violation
+Bug73.dfy(7,13): Error: assertion violation
Execution trace:
(0,0): anon0
Bug73.dfy(7,19): anon3_Else
(0,0): anon2
-Bug73.dfy(13,14): Error: assertion violation
+Bug73.dfy(13,13): Error: assertion violation
Execution trace:
(0,0): anon0
Bug73.dfy(13,20): anon3_Else
diff --git a/Test/dafny4/Bug75.dfy b/Test/dafny4/Bug75.dfy
new file mode 100644
index 00000000..37d35f77
--- /dev/null
+++ b/Test/dafny4/Bug75.dfy
@@ -0,0 +1,50 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+predicate R1(x:int, y:int) { x > 0 ==> R2(x - 1) }
+predicate R2(x:int) { exists y :: R1(x, y) }
+
+lemma L1(x:int)
+{
+ assume R2(x);
+ assert exists y :: R1(x, y); // FAILS
+}
+
+lemma L2(x:int)
+ requires R2(x); // Oddly, adding this requires fixes the problem
+{
+ assume R2(x);
+ assert exists y :: R1(x, y); // SUCCEEDS
+}
+
+// this predicate says that the first "n" elements of "s"
+
+// are in strictly increasing order
+
+predicate method Increasing(s: seq<int>, n: nat)
+ requires n <= |s|
+{
+ n < 2 ||
+ (s[n-2] < s[n-1] && Increasing(s, n-1))
+}
+
+method Extend(s: seq<int>, n: nat) returns (n': nat)
+ requires n < |s|
+ requires forall i :: 0 <= i < n ==> Increasing(s, i)
+ ensures n <= n' <= |s|
+ ensures forall j :: 0 <= j < n' ==> Increasing(s, j)
+{
+ if 2 <= n && s[n-2] < s[n-1] {
+ n' := n + 1;
+ } else {
+ n' := n;
+ }
+}
+
+function pred(i:int):int { i - 1 }
+predicate f(a:int, s:int) { (a <= 0 || (exists s0 :: f(pred(a), s0))) }
+
+lemma Fuel1(a:int, s:int)
+{
+ assert f(a, s) <==> (a <= 0 || (exists s0 :: f(pred(a), s0))); // FAILS
+}
diff --git a/Test/dafny4/Bug75.dfy.expect b/Test/dafny4/Bug75.dfy.expect
new file mode 100644
index 00000000..aeb37948
--- /dev/null
+++ b/Test/dafny4/Bug75.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 13 verified, 0 errors
diff --git a/Test/dafny4/Bug79.dfy b/Test/dafny4/Bug79.dfy
new file mode 100644
index 00000000..49f2421b
--- /dev/null
+++ b/Test/dafny4/Bug79.dfy
@@ -0,0 +1,10 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function foo(s:int) : (int, int)
+
+function bar(s:int) : bool
+{
+ var (x, rest) := foo(s);
+ x > 0
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug79.dfy.expect b/Test/dafny4/Bug79.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Bug79.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug81.dfy b/Test/dafny4/Bug81.dfy
new file mode 100644
index 00000000..1992d666
--- /dev/null
+++ b/Test/dafny4/Bug81.dfy
@@ -0,0 +1,9 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function {:opaque} RefineSeqToSeq<T,U>(s:seq<T>, refine_func:T->U) : seq<U>
+ reads refine_func.reads;
+{
+ if |s| == 0 then []
+ else RefineSeqToSeq(s[1..], refine_func)
+}
diff --git a/Test/dafny4/Bug81.dfy.expect b/Test/dafny4/Bug81.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Bug81.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/Bug82.dfy b/Test/dafny4/Bug82.dfy
new file mode 100644
index 00000000..bc7ff321
--- /dev/null
+++ b/Test/dafny4/Bug82.dfy
@@ -0,0 +1,11 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function {:opaque} Reverse(id:int) : int
+
+function RefineToMap(ReverseKey:int->int) : bool
+
+function RefineToMapOfSeqNums() : bool
+{
+ RefineToMap(Reverse)
+}
diff --git a/Test/dafny4/Bug82.dfy.expect b/Test/dafny4/Bug82.dfy.expect
new file mode 100644
index 00000000..73ba063c
--- /dev/null
+++ b/Test/dafny4/Bug82.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 4 verified, 0 errors
diff --git a/Test/dafny4/Bug88.dfy b/Test/dafny4/Bug88.dfy
new file mode 100644
index 00000000..cab2524a
--- /dev/null
+++ b/Test/dafny4/Bug88.dfy
@@ -0,0 +1,18 @@
+// RUN: %dafny "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+lemma T(a: int) returns (b: int)
+ ensures a == b
+{
+ calc {
+ a;
+ }
+}
+
+lemma A(i: int)
+ ensures false
+{
+ if * {
+ } else {
+ }
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug88.dfy.expect b/Test/dafny4/Bug88.dfy.expect
new file mode 100644
index 00000000..3bd22329
--- /dev/null
+++ b/Test/dafny4/Bug88.dfy.expect
@@ -0,0 +1,11 @@
+Bug88.dfy(6,0): Error BP5003: A postcondition might not hold on this return path.
+Bug88.dfy(5,12): Related location: This is the postcondition that might not hold.
+Execution trace:
+ (0,0): anon0
+ Bug88.dfy(7,3): anon2_Else
+Bug88.dfy(14,0): Error BP5003: A postcondition might not hold on this return path.
+Bug88.dfy(13,10): Related location: This is the postcondition that might not hold.
+Execution trace:
+ (0,0): anon0
+
+Dafny program verifier finished with 2 verified, 2 errors
diff --git a/Test/dafny4/Bug89.dfy b/Test/dafny4/Bug89.dfy
new file mode 100644
index 00000000..12aec5f4
--- /dev/null
+++ b/Test/dafny4/Bug89.dfy
@@ -0,0 +1,15 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+method F() returns(x:int)
+ ensures x == 6;
+{
+ x := 5;
+ x := (var y := 1; y + x);
+}
+
+method Main()
+{
+ var x := F();
+ print x;
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug89.dfy.expect b/Test/dafny4/Bug89.dfy.expect
new file mode 100644
index 00000000..5221a5d1
--- /dev/null
+++ b/Test/dafny4/Bug89.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 4 verified, 0 errors
+Program compiled successfully
+Running...
+
+6 \ No newline at end of file
diff --git a/Test/dafny4/Bug91.dfy b/Test/dafny4/Bug91.dfy
new file mode 100644
index 00000000..53e5d5b2
--- /dev/null
+++ b/Test/dafny4/Bug91.dfy
@@ -0,0 +1,40 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+type SendState = map<int, seq<int>>
+
+function UnAckedMessages(s:SendState) : set<int>
+{
+ set m,dst | dst in s && m in s[dst] :: m
+}
+
+function UnAckedMessagesForDst(s:SendState, dst:int) : set<int>
+ requires dst in s;
+{
+ set m | m in s[dst] :: m
+}
+
+function UnAckedMessages3(s:SendState) : set<int>
+{
+ set m,dst | dst in s && m in UnAckedMessagesForDst(s, dst) :: m
+}
+
+function SeqToSet<T>(s:seq<T>) : set<T>
+{
+ set i | i in s
+}
+
+function UnAckedMessages4(s:SendState) : set<int>
+{
+ set m,dst | dst in s && m in SeqToSet(s[dst]) :: m
+}
+
+function UnAckedLists(s:SendState) : set<seq<int>>
+{
+ set dst | dst in s :: s[dst]
+}
+
+function UnAckedMessages5(s:SendState) : set<int>
+{
+ set m, list | list in UnAckedLists(s) && m in list :: m
+} \ No newline at end of file
diff --git a/Test/dafny4/Bug91.dfy.expect b/Test/dafny4/Bug91.dfy.expect
new file mode 100644
index 00000000..76f19e0d
--- /dev/null
+++ b/Test/dafny4/Bug91.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 7 verified, 0 errors
diff --git a/Test/dafny4/Bug93.dfy b/Test/dafny4/Bug93.dfy
new file mode 100644
index 00000000..5a1dd27f
--- /dev/null
+++ b/Test/dafny4/Bug93.dfy
@@ -0,0 +1,37 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+module Fuel {
+ function FunctionA(x:int) : int
+ {
+ x + 2
+ }
+
+ function FunctionB(y:int) : int
+ {
+ FunctionA(y - 2)
+ }
+
+ method {:fuel FunctionA,0,0} MethodX(z:int)
+ {
+ assert FunctionB(z) == z; // error: Cannot see the body of FunctionA
+ }
+}
+
+module Opaque {
+ function {:opaque} FunctionA(x:int) : int
+ {
+ x + 2
+ }
+
+ function FunctionB(y:int) : int
+ {
+ FunctionA(y - 2)
+ }
+
+ method MethodX(z:int)
+ {
+ assert FunctionB(z) == z;
+ }
+}
+
diff --git a/Test/dafny4/Bug93.dfy.expect b/Test/dafny4/Bug93.dfy.expect
new file mode 100644
index 00000000..d0baf877
--- /dev/null
+++ b/Test/dafny4/Bug93.dfy.expect
@@ -0,0 +1,8 @@
+Bug93.dfy(17,28): Error: assertion violation
+Execution trace:
+ (0,0): anon0
+Bug93.dfy(34,28): Error: assertion violation
+Execution trace:
+ (0,0): anon0
+
+Dafny program verifier finished with 7 verified, 2 errors
diff --git a/Test/dafny4/Bug94.dfy b/Test/dafny4/Bug94.dfy
new file mode 100644
index 00000000..2f437785
--- /dev/null
+++ b/Test/dafny4/Bug94.dfy
@@ -0,0 +1,35 @@
+// RUN: %dafny /compile:3 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+function foo() : (int, int)
+{
+ (5, 10)
+}
+
+function bar() : int
+{
+ var (x, y) := foo();
+ x + y
+}
+
+lemma test()
+{
+ var (x, y) := foo();
+}
+
+function method foo2() : (int,int)
+{
+ (5, 10)
+}
+
+method test2()
+{
+ var (x, y) := foo2();
+}
+
+method Main()
+{
+ var (x, y) := foo2();
+ assert (x+y == 15);
+ print(x+y);
+}
diff --git a/Test/dafny4/Bug94.dfy.expect b/Test/dafny4/Bug94.dfy.expect
new file mode 100644
index 00000000..6b337d5a
--- /dev/null
+++ b/Test/dafny4/Bug94.dfy.expect
@@ -0,0 +1,6 @@
+
+Dafny program verifier finished with 9 verified, 0 errors
+Program compiled successfully
+Running...
+
+15 \ No newline at end of file
diff --git a/Test/dafny4/Bug99.dfy b/Test/dafny4/Bug99.dfy
new file mode 100644
index 00000000..3f95ce9f
--- /dev/null
+++ b/Test/dafny4/Bug99.dfy
@@ -0,0 +1,11 @@
+// RUN: %dafny /autoTriggers:1 /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+predicate P(e:int, p:int) { true }
+predicate Q(i:int, t:int)
+
+lemma Tester(x:int)
+{
+ assert forall i :: Q(i, x) ==> (forall p {:trigger P(i, p)} :: P(i, p));
+
+}
diff --git a/Test/dafny4/Bug99.dfy.expect b/Test/dafny4/Bug99.dfy.expect
new file mode 100644
index 00000000..73ba063c
--- /dev/null
+++ b/Test/dafny4/Bug99.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 4 verified, 0 errors
diff --git a/Test/dafny4/Circ.dfy b/Test/dafny4/Circ.dfy
index e7609195..d110c05c 100644
--- a/Test/dafny4/Circ.dfy
+++ b/Test/dafny4/Circ.dfy
@@ -16,6 +16,7 @@ function zip(a: Stream, b: Stream): Stream { Cons(a.head, zip(b, a.tail)) }
colemma BlinkZipProperty()
ensures zip(zeros(), ones()) == blink();
{
+ BlinkZipProperty();
}
// ----- Thue-Morse sequence -----
@@ -75,6 +76,7 @@ colemma FProperty(s: Stream<Bit>)
// def. zip
Cons(s.head, Cons(not(s).head, zip(s.tail, not(s).tail)));
}
+ FProperty(s.tail);
}
// The fix-point theorem now follows easily.
diff --git a/Test/dafny4/CoqArt-InsertionSort.dfy b/Test/dafny4/CoqArt-InsertionSort.dfy
index efd01537..99e0f0b1 100644
--- a/Test/dafny4/CoqArt-InsertionSort.dfy
+++ b/Test/dafny4/CoqArt-InsertionSort.dfy
@@ -151,6 +151,7 @@ lemma existence_proof(l: List<int>)
{
match l {
case Nil =>
+ assert sorted(Nil);
case Cons(x, m) =>
existence_proof(m);
var m' :| equiv(m, m') && sorted(m');
diff --git a/Test/dafny4/FlyingRobots.dfy b/Test/dafny4/FlyingRobots.dfy
new file mode 100644
index 00000000..c80d310d
--- /dev/null
+++ b/Test/dafny4/FlyingRobots.dfy
@@ -0,0 +1,285 @@
+// RUN: %dafny /compile:3 /dprint:"%t.dprint" /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+// The flying robots examples from an F* tutorial. It demonstrates how to specify
+// mutable data structures in the heap.
+
+class Cell {
+ var val:int
+ constructor (v:int)
+ modifies this
+ ensures val == v
+ {
+ val := v;
+ }
+}
+
+class Point {
+ ghost var Value: (int, int, int)
+
+ ghost var Repr: set<object>
+ predicate Valid()
+ reads this, Repr
+ {
+ this in Repr && null !in Repr &&
+ {x,y,z} <= Repr &&
+ x != y && y != z && z != x &&
+ Value == (x.val, y.val, z.val)
+ }
+
+ var x:Cell, y:Cell, z:Cell
+
+ constructor (a:int, b:int, c:int)
+ modifies this
+ ensures Valid() && fresh(Repr - {this})
+ ensures Value == (a, b, c)
+ {
+ x := new Cell(a);
+ y := new Cell(b);
+ z := new Cell(c);
+ Repr := {this};
+ Repr := Repr + {x, y, z};
+ Value := (a, b, c);
+ }
+
+ method Mutate(a:int, b:int, c:int)
+ requires Valid()
+ modifies Repr
+ ensures Valid() && fresh(Repr - old(Repr))
+ ensures Value == (a, b, c)
+ {
+ x.val, y.val, z.val := a, b, c;
+ Value := (a, b, c);
+ }
+}
+
+class Arm {
+ ghost var Value: (int, int)
+
+ ghost var Repr: set<object>
+ predicate Valid()
+ reads this, Repr
+ {
+ this in Repr && null !in Repr &&
+ {polar, azim} <= Repr &&
+ polar != azim &&
+ Value == (polar.val, azim.val)
+ }
+
+ var polar:Cell
+ var azim:Cell
+
+ constructor (polar_in:int, azim_in:int)
+ modifies this
+ ensures Valid() && fresh(Repr - {this})
+ ensures Value == (polar_in, azim_in)
+ {
+ polar := new Cell(polar_in);
+ azim := new Cell(azim_in);
+ Repr := {this};
+ Repr := Repr + {polar, azim};
+ Value := (polar_in, azim_in);
+ }
+
+ method Mutate(polar_in:int, azim_in:int)
+ requires Valid()
+ modifies Repr
+ ensures Valid() && fresh(Repr - old(Repr))
+ ensures Value == (polar_in, azim_in)
+ {
+ polar.val, azim.val := polar_in, azim_in;
+ Value := (polar_in, azim_in);
+ }
+}
+
+class Bot {
+ ghost var Repr: set<object>
+ predicate {:opaque} Valid()
+ reads this, Repr
+ ensures Valid() ==> this in Repr && null !in Repr
+ {
+ this in Repr && null !in Repr &&
+ pos in Repr && {left, right} <= Repr &&
+ left != right &&
+ pos.Repr <= Repr && left.Repr <= Repr && right.Repr <= Repr &&
+ pos.Repr !! left.Repr !! right.Repr &&
+ pos.Valid() && left.Valid() && right.Valid()
+ }
+
+ var pos:Point
+ var left:Arm
+ var right:Arm
+
+ constructor ()
+ modifies this
+ ensures Valid() && fresh(Repr - {this})
+ {
+ pos := new Point(0, 0, 0);
+ left := new Arm(0, 0);
+ right := new Arm(0, 0);
+ Repr := {this};
+ Repr := Repr + pos.Repr + left.Repr + right.Repr;
+ reveal_Valid();
+ }
+
+ predicate flying()
+ requires (reveal_Valid(); Valid())
+ reads Repr
+ {
+ pos.z.val > 0
+ }
+
+ predicate arms_up()
+ requires (reveal_Valid(); Valid())
+ reads Repr
+ {
+ left.polar.val == right.polar.val == 0
+ }
+
+ predicate robot_inv()
+ requires (reveal_Valid(); Valid())
+ reads Repr
+ {
+ flying() ==> arms_up()
+ }
+
+ method Fly()
+ requires Valid()
+ modifies Repr
+ ensures Valid() && fresh(Repr - old(Repr))
+ ensures robot_inv() && flying()
+ {
+ reveal_Valid();
+ left.polar.val, right.polar.val := 0, 0;
+ pos.z.val := 100;
+ right.azim.val := 17;
+ pos.Value := (pos.Value.0, pos.Value.1, 100);
+ left.Value, right.Value := (0, left.Value.1), (0, 17);
+ reveal_Valid();
+ }
+}
+
+// This method tests that Fly operates independently on disjoint robots
+method FlyRobots(b0:Bot, b1:Bot)
+ requires b0 != null && b0.Valid()
+ requires b1 != null && b1.Valid()
+ requires b0 != b1 ==> b0.Repr !! b1.Repr
+ modifies b0.Repr, b1.Repr
+ ensures b0.Valid() && fresh(b0.Repr - old(b0.Repr))
+ ensures b1.Valid() && fresh(b1.Repr - old(b1.Repr))
+ ensures b0 != b1 ==> b0.Repr !! b1.Repr
+ ensures b0.robot_inv() && b1.robot_inv()
+ ensures b0.flying() && b1.flying()
+{
+ b0.Fly();
+ b1.Fly();
+}
+
+// ----- robot armies ----------
+
+// The union of .Repr for the robots in "bots"
+function ArmyRepr(bots:seq<Bot>) : set<object>
+ reads set b | b in bots
+{
+ set b,o | b in bots && b != null && o in b.Repr :: o
+}
+
+// An army is a sequence of disjoint, valid robots
+predicate ValidArmy(bots:seq<Bot>)
+ reads set b | b in bots
+ reads ArmyRepr(bots)
+{
+ (forall i :: 0 <= i < |bots| ==> bots[i] != null && bots[i].Valid())
+ && (forall i,j :: 0 <= i < j < |bots| ==> bots[i].Repr !! bots[j].Repr)
+}
+
+method FlyRobotArmy(bots:seq<Bot>)
+ requires ValidArmy(bots)
+ modifies ArmyRepr(bots)
+ ensures ValidArmy(bots) && fresh(ArmyRepr(bots) - old(ArmyRepr(bots)))
+ ensures forall b :: b in bots ==> b.Valid() && b.robot_inv() && b.flying()
+{
+ if * {
+ // fly recursively
+ FlyRobotArmy_Recursively(bots);
+ } else {
+ // fly iteratively
+ var n := 0;
+ while n < |bots|
+ invariant 0 <= n <= |bots|
+ invariant ValidArmy(bots)
+ invariant forall j :: 0 <= j < n ==> bots[j].Valid() && bots[j].robot_inv() && bots[j].flying()
+ invariant forall i :: 0 <= i < |bots| ==> fresh(bots[i].Repr - old(bots[i].Repr))
+ {
+ FlyOne(bots, n);
+ n := n + 1;
+ }
+ }
+}
+
+method FlyRobotArmy_Recursively(bots:seq<Bot>)
+ requires ValidArmy(bots)
+ modifies ArmyRepr(bots)
+ ensures ValidArmy(bots)
+ ensures forall i :: 0 <= i < |bots| ==> fresh(bots[i].Repr - old(bots[i].Repr))
+ ensures forall b :: b in bots ==> b.robot_inv() && b.flying()
+{
+ if bots != [] {
+ FlyOne(bots, 0);
+ FlyRobotArmy_Recursively(bots[1..]);
+ }
+}
+
+// This method is intended to be called in each loop iteration of FlyRobotArmy
+method FlyOne(bots:seq<Bot>, n:int)
+ requires 0 <= n < |bots|
+ requires forall j :: 0 <= j < |bots| ==> bots[j] != null && bots[j].Valid()
+ requires forall i,j :: 0 <= i < j < |bots| ==> bots[i].Repr !! bots[j].Repr
+ requires forall j :: 0 <= j < n ==> bots[j].robot_inv() && bots[j].flying()
+ modifies bots[n].Repr
+ ensures forall j :: 0 <= j < |bots| ==> bots[j].Valid()
+ ensures fresh(bots[n].Repr - old(bots[n].Repr))
+ ensures bots[n].robot_inv() && bots[n].flying()
+ ensures forall j :: 0 <= j < |bots| && j != n ==> bots[j].Repr == old(bots[j].Repr)
+ ensures forall j :: 0 <= j < n ==> bots[j].robot_inv() && bots[j].flying()
+{
+ bots[n].Fly();
+}
+
+// This method makes sure FlyRobotArmy is callable and callable again
+method FormArmy(b0:Bot, b1:Bot, b2:Bot)
+ requires null !in {b0, b1, b2}
+ requires b0.Valid() && b1.Valid() && b2.Valid()
+ requires b0.Repr !! b1.Repr !! b2.Repr
+ modifies b0.Repr, b1.Repr, b2.Repr
+ ensures b0.Valid() && b1.Valid() && b2.Valid()
+ ensures b0.Repr !! b1.Repr !! b2.Repr
+ ensures fresh(b0.Repr + b1.Repr + b2.Repr - old(b0.Repr + b1.Repr + b2.Repr))
+{
+ var army := [b0, b1, b2];
+ ArmyRepr3(army);
+ FlyRobotArmy(army);
+ FlyRobotArmy(army); // do it again
+ ArmyRepr3(army);
+}
+
+lemma ArmyRepr3(army:seq<Bot>)
+ requires null !in army && |army| == 3
+ ensures ArmyRepr(army) == army[0].Repr + army[1].Repr + army[2].Repr
+{
+}
+
+// ----- Make sure everything is callable ----------
+
+method Main()
+{
+ var b0 := new Bot();
+ var b1 := new Bot();
+ FlyRobots(b0, b1);
+ FlyRobots(b0, b1);
+ FlyRobots(b1, b0);
+
+ var b2 := new Bot();
+ FormArmy(b0, b1, b2);
+ FormArmy(b2, b0, b1);
+}
diff --git a/Test/dafny4/FlyingRobots.dfy.expect b/Test/dafny4/FlyingRobots.dfy.expect
new file mode 100644
index 00000000..99cdc3cb
--- /dev/null
+++ b/Test/dafny4/FlyingRobots.dfy.expect
@@ -0,0 +1,5 @@
+
+Dafny program verifier finished with 37 verified, 0 errors
+Program compiled successfully
+Running...
+
diff --git a/Test/dafny4/Fstar-QuickSort.dfy b/Test/dafny4/Fstar-QuickSort.dfy
index 4c5bc09b..d895ccf4 100644
--- a/Test/dafny4/Fstar-QuickSort.dfy
+++ b/Test/dafny4/Fstar-QuickSort.dfy
@@ -6,8 +6,6 @@
// Dafny needs help with a couple of lemmas in places where F* does not need them.
// Comments below show differences between the F* and Dafny versions.
-datatype Pair<T, U> = P(T, U)
-
datatype List<T> = Nil | Cons(T, List)
function length(list: List): nat // for termination proof
@@ -26,7 +24,7 @@ function In(x: int, list: List<int>): nat
}
predicate SortedRange(m: int, n: int, list: List<int>)
- decreases list; // for termination proof
+ decreases list // for termination proof
{
match list
case Nil => m <= n
@@ -34,45 +32,45 @@ predicate SortedRange(m: int, n: int, list: List<int>)
}
function append(n0: int, n1: int, n2: int, n3: int, i: List<int>, j: List<int>): List<int>
- requires n0 <= n1 <= n2 <= n3;
- requires SortedRange(n0, n1, i) && SortedRange(n2, n3, j);
- ensures SortedRange(n0, n3, append(n0, n1, n2, n3, i, j));
- ensures forall x :: In(x, append(n0, n1, n2, n3, i, j)) == In(x, i) + In(x, j);
- decreases i; // for termination proof
+ requires n0 <= n1 <= n2 <= n3
+ requires SortedRange(n0, n1, i) && SortedRange(n2, n3, j)
+ ensures SortedRange(n0, n3, append(n0, n1, n2, n3, i, j))
+ ensures forall x :: In(x, append(n0, n1, n2, n3, i, j)) == In(x, i) + In(x, j)
+ decreases i // for termination proof
{
match i
case Nil => j
case Cons(hd, tl) => Cons(hd, append(hd, n1, n2, n3, tl, j))
}
-function partition(x: int, l: List<int>): Pair<List<int>, List<int>>
- ensures var P(lo, hi) := partition(x, l);
+function partition(x: int, l: List<int>): (List<int>, List<int>)
+ ensures var (lo, hi) := partition(x, l);
(forall y :: In(y, lo) == if y <= x then In(y, l) else 0) &&
(forall y :: In(y, hi) == if x < y then In(y, l) else 0) &&
- length(l) == length(lo) + length(hi); // for termination proof
+ length(l) == length(lo) + length(hi) // for termination proof
{
match l
- case Nil => P(Nil, Nil)
+ case Nil => (Nil, Nil)
case Cons(hd, tl) =>
- var P(lo, hi) := partition(x, tl);
+ var (lo, hi) := partition(x, tl);
if hd <= x then
- P(Cons(hd, lo), hi)
+ (Cons(hd, lo), hi)
else
- P(lo, Cons(hd, hi))
+ (lo, Cons(hd, hi))
}
function sort(min: int, max: int, i: List<int>): List<int>
- requires min <= max;
- requires forall x :: In(x, i) != 0 ==> min <= x <= max;
- ensures SortedRange(min, max, sort(min, max, i));
- ensures forall x :: In(x, i) == In(x, sort(min, max, i));
- decreases length(i); // for termination proof
+ requires min <= max
+ requires forall x :: In(x, i) != 0 ==> min <= x <= max
+ ensures SortedRange(min, max, sort(min, max, i))
+ ensures forall x :: In(x, i) == In(x, sort(min, max, i))
+ decreases length(i) // for termination proof
{
match i
case Nil => Nil
case Cons(hd, tl) =>
assert In(hd, i) != 0; // this proof line not needed in F*
- var P(lo, hi) := partition(hd, tl);
+ var (lo, hi) := partition(hd, tl);
assert forall y :: In(y, lo) <= In(y, i); // this proof line not needed in F*
var i' := sort(min, hd, lo);
var j' := sort(hd, max, hi);
diff --git a/Test/dafny4/GHC-MergeSort.dfy b/Test/dafny4/GHC-MergeSort.dfy
index e06773eb..24903d87 100644
--- a/Test/dafny4/GHC-MergeSort.dfy
+++ b/Test/dafny4/GHC-MergeSort.dfy
@@ -1,4 +1,4 @@
-// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
+// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// Rustan Leino
@@ -412,11 +412,8 @@ lemma sorted_replaceSuffix(xs: List<G>, ys: List<G>, zs: List<G>)
match xs {
case Nil =>
case Cons(c, xs') =>
- forall a,b | a in multiset_of(xs') && b in multiset_of(Cons(c, zs))
- ensures Below(a, b);
- {
- sorted_reverse(xs', Cons(c, ys));
- }
+ sorted_reverse(xs, ys);
+ sorted_reverse(xs', Cons(c, ys));
sorted_replaceSuffix(xs', Cons(c, ys), Cons(c, zs));
}
}
diff --git a/Test/dafny4/KozenSilva.dfy b/Test/dafny4/KozenSilva.dfy
index af0cdc71..ef49b10f 100644
--- a/Test/dafny4/KozenSilva.dfy
+++ b/Test/dafny4/KozenSilva.dfy
@@ -25,8 +25,8 @@ copredicate LexLess(s: Stream<int>, t: Stream<int>)
// A co-lemma is used to establish the truth of a co-predicate.
colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires LexLess(s, t) && LexLess(t, u);
- ensures LexLess(s, u);
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
{
// Here is the proof, which is actually a body of code. It lends itself to a
// simple, intuitive co-inductive reading. For a theorem this simple, this simple
@@ -38,6 +38,14 @@ colemma Theorem1_LexLess_Is_Transitive(s: Stream<int>, t: Stream<int>, u: Stream
}
}
+// Actually, Dafny can do the proof of the previous lemma completely automatically. Here it is:
+colemma Theorem1_LexLess_Is_Transitive_Automatic(s: Stream<int>, t: Stream<int>, u: Stream<int>)
+ requires LexLess(s, t) && LexLess(t, u)
+ ensures LexLess(s, u)
+{
+ // no manual proof needed, so the body of the co-lemma is empty
+}
+
// The following predicate captures the (inductively defined) negation of (the
// co-inductively defined) LexLess above.
predicate NotLexLess(s: Stream<int>, t: Stream<int>)
@@ -51,7 +59,7 @@ predicate NotLexLess'(k: nat, s: Stream<int>, t: Stream<int>)
}
lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
- ensures LexLess(s, t) <==> !NotLexLess(s, t);
+ ensures LexLess(s, t) <==> !NotLexLess(s, t)
{
if !NotLexLess(s, t) {
EquivalenceTheorem0(s, t);
@@ -61,8 +69,8 @@ lemma EquivalenceTheorem(s: Stream<int>, t: Stream<int>)
}
}
colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
- requires !NotLexLess(s, t);
- ensures LexLess(s, t);
+ requires !NotLexLess(s, t)
+ ensures LexLess(s, t)
{
// Here, more needs to be said about the way Dafny handles co-lemmas.
// The way a co-lemma establishes a co-predicate is to prove, by induction,
@@ -74,14 +82,14 @@ colemma EquivalenceTheorem0(s: Stream<int>, t: Stream<int>)
// indicates a finite unrolling of a co-inductive predicate. In particular,
// LexLess#[k] refers to k unrollings of LexLess.
lemma EquivalenceTheorem0_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires !NotLexLess'(k, s, t);
- ensures LexLess#[k](s, t);
+ requires !NotLexLess'(k, s, t)
+ ensures LexLess#[k](s, t)
{
// This simple inductive proof is done completely automatically by Dafny.
}
lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess(s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess(s, t)
{
// The forall statement in Dafny is used, here, as universal introduction:
// what EquivalenceTheorem1_Lemma establishes for one k, the forall
@@ -91,22 +99,22 @@ lemma EquivalenceTheorem1(s: Stream<int>, t: Stream<int>)
}
}
lemma EquivalenceTheorem1_Lemma(k: nat, s: Stream<int>, t: Stream<int>)
- requires LexLess(s, t);
- ensures !NotLexLess'(k, s, t);
+ requires LexLess(s, t)
+ ensures !NotLexLess'(k, s, t)
{
}
lemma Theorem1_Alt(s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess(s, u);
- ensures NotLexLess(s, t) || NotLexLess(t, u);
+ requires NotLexLess(s, u)
+ ensures NotLexLess(s, t) || NotLexLess(t, u)
{
forall k: nat | NotLexLess'(k, s, u) {
Theorem1_Alt_Lemma(k, s, t, u);
}
}
lemma Theorem1_Alt_Lemma(k: nat, s: Stream<int>, t: Stream<int>, u: Stream<int>)
- requires NotLexLess'(k, s, u);
- ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u);
+ requires NotLexLess'(k, s, u)
+ ensures NotLexLess'(k, s, t) || NotLexLess'(k, t, u)
{
}
@@ -116,8 +124,8 @@ function PointwiseAdd(s: Stream<int>, t: Stream<int>): Stream<int>
}
colemma Theorem2_Pointwise_Addition_Is_Monotone(s: Stream<int>, t: Stream<int>, u: Stream<int>, v: Stream<int>)
- requires LexLess(s, t) && LexLess(u, v);
- ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v));
+ requires LexLess(s, t) && LexLess(u, v)
+ ensures LexLess(PointwiseAdd(s, u), PointwiseAdd(t, v))
{
// The co-lemma will establish the co-inductive predicate by establishing
// all finite unrollings thereof. Each finite unrolling is proved by
@@ -148,15 +156,9 @@ copredicate Subtype(a: RecType, b: RecType)
}
colemma Theorem3_Subtype_Is_Transitive(a: RecType, b: RecType, c: RecType)
- requires Subtype(a, b) && Subtype(b, c);
- ensures Subtype(a, c);
-{
- if a == Bottom || c == Top {
- // done
- } else {
- Theorem3_Subtype_Is_Transitive(c.dom, b.dom, a.dom);
- Theorem3_Subtype_Is_Transitive(a.ran, b.ran, c.ran);
- }
+ requires Subtype(a, b) && Subtype(b, c)
+ ensures Subtype(a, c)
+{
}
// --------------------------------------------------------------------------
@@ -184,16 +186,16 @@ copredicate ValBelow(u: Val, v: Val)
}
colemma Theorem4a_ClEnvBelow_Is_Transitive(c: ClEnv, d: ClEnv, e: ClEnv)
- requires ClEnvBelow(c, d) && ClEnvBelow(d, e);
- ensures ClEnvBelow(c, e);
+ requires ClEnvBelow(c, d) && ClEnvBelow(d, e)
+ ensures ClEnvBelow(c, e)
{
forall y | y in c.m {
Theorem4b_ValBelow_Is_Transitive#[_k-1](c.m[y], d.m[y], e.m[y]);
}
}
colemma Theorem4b_ValBelow_Is_Transitive(u: Val, v: Val, w: Val)
- requires ValBelow(u, v) && ValBelow(v, w);
- ensures ValBelow(u, w);
+ requires ValBelow(u, v) && ValBelow(v, w)
+ ensures ValBelow(u, w)
{
if u.ValCl? {
Theorem4a_ClEnvBelow_Is_Transitive(u.cl.env, v.cl.env, w.cl.env);
@@ -209,7 +211,7 @@ predicate IsCapsule(cap: Capsule)
}
function ClosureConversion(cap: Capsule): Cl
- requires IsCapsule(cap);
+ requires IsCapsule(cap)
{
Closure(cap.e.abs, ClosureConvertedMap(cap.s))
// In the Kozen and Silva paper, there are more conditions, having to do with free variables,
@@ -229,8 +231,8 @@ predicate CapsuleEnvironmentBelow(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAb
}
colemma Theorem5_ClosureConversion_Is_Monotone(s: map<Var, ConstOrAbs>, t: map<Var, ConstOrAbs>)
- requires CapsuleEnvironmentBelow(s, t);
- ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t));
+ requires CapsuleEnvironmentBelow(s, t)
+ ensures ClEnvBelow(ClosureConvertedMap(s), ClosureConvertedMap(t))
{
}
@@ -251,8 +253,8 @@ copredicate Bisim(s: Stream, t: Stream)
}
colemma Theorem6_Bisim_Is_Symmetric(s: Stream, t: Stream)
- requires Bisim(s, t);
- ensures Bisim(t, s);
+ requires Bisim(s, t)
+ ensures Bisim(t, s)
{
// proof is automatic
}
@@ -270,8 +272,8 @@ function merge(s: Stream, t: Stream): Stream
// In general, the termination argument needs to be supplied explicitly in terms
// of a metric, rank, variant function, or whatever you want to call it--a
// "decreases" clause in Dafny. Dafny provides some help in making up "decreases"
-// clauses, and in this case it automatically adds "decreases 0;" to SplitLeft
-// and "decreases 1;" to SplitRight. With these "decreases" clauses, the
+// clauses, and in this case it automatically adds "decreases 0" to SplitLeft
+// and "decreases 1" to SplitRight. With these "decreases" clauses, the
// termination check of SplitRight's call to SplitLeft will simply be "0 < 1",
// which is trivial to check.
function SplitLeft(s: Stream): Stream
@@ -284,7 +286,7 @@ function SplitRight(s: Stream): Stream
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
- ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s);
+ ensures Bisim(merge(SplitLeft(s), SplitRight(s)), s)
{
var LHS := merge(SplitLeft(s), SplitRight(s));
// The construct that follows is a "calc" statement. It gives a way to write an
@@ -318,7 +320,7 @@ colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Bisim(s: Stream)
}
colemma Theorem7_Merge_Is_Left_Inverse_Of_Split_Equal(s: Stream)
- ensures merge(SplitLeft(s), SplitRight(s)) == s;
+ ensures merge(SplitLeft(s), SplitRight(s)) == s
{
// The proof of this co-lemma is actually done completely automatically (so the
// body of this co-lemma can be empty). However, just to show what the calculations
diff --git a/Test/dafny4/KozenSilva.dfy.expect b/Test/dafny4/KozenSilva.dfy.expect
index c6c90498..90432af3 100644
--- a/Test/dafny4/KozenSilva.dfy.expect
+++ b/Test/dafny4/KozenSilva.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 47 verified, 0 errors
+Dafny program verifier finished with 49 verified, 0 errors
diff --git a/Test/dafny4/LargeConstants.dfy b/Test/dafny4/LargeConstants.dfy
new file mode 100644
index 00000000..18435c30
--- /dev/null
+++ b/Test/dafny4/LargeConstants.dfy
@@ -0,0 +1,14 @@
+// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+lemma largeIsLarge()
+ ensures 0x8000000000000000 > 0 {
+}
+
+lemma SmallIsSmall()
+ ensures -0x8000000000000000 < 0 {
+}
+
+lemma ShouldCancelOut()
+ ensures -0x8000000000000000 + 0x8000000000000000 == 0 {
+}
diff --git a/Test/dafny4/LargeConstants.dfy.expect b/Test/dafny4/LargeConstants.dfy.expect
new file mode 100644
index 00000000..4ef2de53
--- /dev/null
+++ b/Test/dafny4/LargeConstants.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 6 verified, 0 errors
diff --git a/Test/dafny4/Leq.dfy b/Test/dafny4/Leq.dfy
new file mode 100644
index 00000000..0491dd00
--- /dev/null
+++ b/Test/dafny4/Leq.dfy
@@ -0,0 +1,174 @@
+// RUN: %dafny /rprint:"%t.rprint" "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+// Rustan Leino, 22 Sep 2015.
+// This file considers two definitions of Leq on naturals+infinity. One
+// definition uses the least fixpoint, the other the greatest fixpoint.
+
+// Nat represents natural numbers extended with infinity
+codatatype Nat = Z | S(pred: Nat)
+
+function Num(n: nat): Nat
+{
+ if n == 0 then Z else S(Num(n-1))
+}
+
+predicate IsFinite(a: Nat)
+{
+ exists m:nat :: a == Num(m)
+}
+
+copredicate IsInfinity(a: Nat)
+{
+ a.S? && IsInfinity(a.pred)
+}
+
+lemma NatCases(a: Nat)
+ ensures IsFinite(a) || IsInfinity(a)
+{
+ if IsFinite(a) {
+ } else {
+ NatCasesAux(a);
+ }
+}
+colemma NatCasesAux(a: Nat)
+ requires !IsFinite(a)
+ ensures IsInfinity(a)
+{
+ assert a != Num(0);
+ if IsFinite(a.pred) {
+ // going for a contradiction
+ var m:nat :| a.pred == Num(m);
+ assert a == Num(m+1);
+ assert false; // the case is absurd
+ }
+ NatCasesAux(a.pred);
+}
+
+// ----------- inductive semantics (more precisely, a least-fixpoint definition of Leq)
+
+inductive predicate Leq(a: Nat, b: Nat)
+{
+ a == Z ||
+ (a.S? && b.S? && Leq(a.pred, b.pred))
+}
+
+lemma LeqTheorem(a: Nat, b: Nat)
+ ensures Leq(a, b) <==>
+ exists m:nat :: a == Num(m) &&
+ (IsInfinity(b) || exists n:nat :: b == Num(n) && m <= n)
+{
+ if exists m:nat,n:nat :: a == Num(m) && b == Num(n) && m <= n {
+ var m:nat,n:nat :| a == Num(m) && b == Num(n) && m <= n;
+ Leq0_finite(m, n);
+ }
+ if (exists m:nat :: a == Num(m)) && IsInfinity(b) {
+ var m:nat :| a == Num(m);
+ Leq0_infinite(m, b);
+ }
+ if Leq(a, b) {
+ var k:nat :| Leq#[k](a, b);
+ var m, n := Leq1(k, a, b);
+ }
+}
+
+lemma Leq0_finite(m: nat, n: nat)
+ requires m <= n
+ ensures Leq(Num(m), Num(n))
+{
+ // proof is automatic
+}
+
+lemma Leq0_infinite(m: nat, b: Nat)
+ requires IsInfinity(b)
+ ensures Leq(Num(m), b)
+{
+ // proof is automatic
+}
+
+lemma Leq1(k: nat, a: Nat, b: Nat) returns (m: nat, n: nat)
+ requires Leq#[k](a, b)
+ ensures a == Num(m)
+ ensures IsInfinity(b) || (b == Num(n) && m <= n)
+{
+ if a == Z {
+ m := 0;
+ NatCases(b);
+ if !IsInfinity(b) {
+ n :| b == Num(n);
+ }
+ } else {
+ assert a.S? && b.S? && Leq(a.pred, b.pred);
+ m,n := Leq1(k-1, a.pred, b.pred);
+ m, n := m + 1, n + 1;
+ }
+}
+
+// ----------- co-inductive semantics (more precisely, a greatest-fixpoint definition of Leq)
+
+copredicate CoLeq(a: Nat, b: Nat)
+{
+ a == Z ||
+ (a.S? && b.S? && CoLeq(a.pred, b.pred))
+}
+
+lemma CoLeqTheorem(a: Nat, b: Nat)
+ ensures CoLeq(a, b) <==>
+ IsInfinity(b) ||
+ exists m:nat,n:nat :: a == Num(m) && b == Num(n) && m <= n
+{
+ if IsInfinity(b) {
+ CoLeq0_infinite(a, b);
+ }
+ if exists m:nat,n:nat :: a == Num(m) && b == Num(n) && m <= n {
+ var m:nat,n:nat :| a == Num(m) && b == Num(n) && m <= n;
+ CoLeq0_finite(m, n);
+ }
+ if CoLeq(a, b) {
+ CoLeq1(a, b);
+ }
+}
+
+lemma CoLeq0_finite(m: nat, n: nat)
+ requires m <= n
+ ensures CoLeq(Num(m), Num(n))
+{
+ // proof is automatic
+}
+
+colemma CoLeq0_infinite(a: Nat, b: Nat)
+ requires IsInfinity(b)
+ ensures CoLeq(a, b)
+{
+ // proof is automatic
+}
+
+lemma CoLeq1(a: Nat, b: Nat)
+ requires CoLeq(a, b)
+ ensures IsInfinity(b) || exists m:nat,n:nat :: a == Num(m) && b == Num(n) && m <= n
+{
+ var m,n := CoLeq1'(a, b);
+}
+
+lemma CoLeq1'(a: Nat, b: Nat) returns (m: nat, n: nat)
+ requires CoLeq(a, b)
+ ensures IsInfinity(b) || (a == Num(m) && b == Num(n) && m <= n)
+{
+ if !IsInfinity(b) {
+ NatCases(b);
+ n :| b == Num(n);
+ m := CoLeq1Aux(a, n);
+ }
+}
+
+lemma CoLeq1Aux(a: Nat, n: nat) returns (m: nat)
+ requires CoLeq(a, Num(n))
+ ensures a == Num(m) && m <= n
+{
+ if a == Z {
+ m := 0;
+ } else {
+ m := CoLeq1Aux(a.pred, n-1);
+ m := m + 1;
+ }
+}
diff --git a/Test/dafny4/Leq.dfy.expect b/Test/dafny4/Leq.dfy.expect
new file mode 100644
index 00000000..754c1e55
--- /dev/null
+++ b/Test/dafny4/Leq.dfy.expect
@@ -0,0 +1,3 @@
+
+Dafny program verifier finished with 29 verified, 0 errors
+Compiled assembly into Leq.dll
diff --git a/Test/dafny4/McCarthy91.dfy b/Test/dafny4/McCarthy91.dfy
new file mode 100644
index 00000000..2e599f0d
--- /dev/null
+++ b/Test/dafny4/McCarthy91.dfy
@@ -0,0 +1,86 @@
+// RUN: %dafny /compile:3 /rprint:"%t.rprint" /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+// The usual recursive method for computing McCarthy's 91 function
+
+method Main() {
+ var s := [3, 99, 100, 101, 1013];
+
+ var n := 0;
+ while n < |s| {
+ var m := M(s[n]);
+ print "M(", s[n], ") = ", m, "\n";
+ n := n + 1;
+ }
+
+ n := 0;
+ while n < |s| {
+ print "mc91(", s[n], ") = ", mc91(s[n]), "\n";
+ n := n + 1;
+ }
+
+ n := 0;
+ while n < |s| {
+ var m := Mc91(s[n]);
+ print "Mc91(", s[n], ") = ", m, "\n";
+ n := n + 1;
+ }
+
+ n := 0;
+ while n < 5 {
+ var m := iter(n, mc91, 40);
+ print "iter(", n, ", mc91, 40) = ", m, "\n";
+ n := n + 1;
+ }
+}
+
+method M(n: int) returns (r: int)
+ ensures r == if n <= 100 then 91 else n - 10
+ decreases 100 - n
+{
+ if n <= 100 {
+ r := M(n + 11);
+ r := M(r);
+ } else {
+ r := n - 10;
+ }
+}
+
+// Same as above, but as a function
+
+function method mc91(n: int): int
+ ensures n <= 100 ==> mc91(n) == 91
+ decreases 100 - n
+{
+ if n <= 100 then
+ mc91(mc91(n + 11))
+ else
+ n - 10
+}
+
+// Iterating a function f e times starting from n
+
+function method iter(e: nat, f: int -> int, n: int): int
+ requires forall x :: f.requires(x) && f.reads(x) == {}
+{
+ if e == 0 then n else iter(e-1, f, f(n))
+}
+
+// Iterative version of McCarthy's 91 function, following in lockstep
+// what the recursive version would do
+
+method Mc91(n0: int) returns (r: int)
+ ensures r == mc91(n0)
+{
+ var e, n := 1, n0;
+ while e > 0
+ invariant iter(e, mc91, n) == mc91(n0)
+ decreases 100 - n + 10 * e, e
+ {
+ if n <= 100 {
+ e, n := e+1, n+11;
+ } else {
+ e, n := e-1, n-10;
+ }
+ }
+ return n;
+}
diff --git a/Test/dafny4/McCarthy91.dfy.expect b/Test/dafny4/McCarthy91.dfy.expect
new file mode 100644
index 00000000..bbc91c35
--- /dev/null
+++ b/Test/dafny4/McCarthy91.dfy.expect
@@ -0,0 +1,25 @@
+
+Dafny program verifier finished with 8 verified, 0 errors
+Program compiled successfully
+Running...
+
+M(3) = 91
+M(99) = 91
+M(100) = 91
+M(101) = 91
+M(1013) = 1003
+mc91(3) = 91
+mc91(99) = 91
+mc91(100) = 91
+mc91(101) = 91
+mc91(1013) = 1003
+Mc91(3) = 91
+Mc91(99) = 91
+Mc91(100) = 91
+Mc91(101) = 91
+Mc91(1013) = 1003
+iter(0, mc91, 40) = 40
+iter(1, mc91, 40) = 91
+iter(2, mc91, 40) = 91
+iter(3, mc91, 40) = 91
+iter(4, mc91, 40) = 91
diff --git a/Test/dafny4/MonadicLaws.dfy b/Test/dafny4/MonadicLaws.dfy
new file mode 100644
index 00000000..b668ffb8
--- /dev/null
+++ b/Test/dafny4/MonadicLaws.dfy
@@ -0,0 +1,100 @@
+// RUN: %dafny /compile:0 /rprint:"%t.rprint" /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+// Monadic Laws
+// Niki Vazou and Rustan Leino
+// 28 March 2016
+
+datatype List<T> = Nil | Cons(head: T, tail: List)
+
+predicate Total<T,U>(p: T -> U)
+ reads p.reads
+{
+ forall x :: p.reads(x) == {} && p.requires(x)
+}
+
+function append(xs: List, ys: List): List
+{
+ match xs
+ case Nil => ys
+ case Cons(x, xs') => Cons(x, append(xs', ys))
+}
+
+lemma AppendNil(xs: List)
+ ensures append(xs, Nil) == xs
+{
+}
+
+lemma AppendAssoc(xs: List, ys: List, zs: List)
+ ensures append(append(xs, ys), zs) == append(xs, append(ys, zs));
+{
+}
+
+function Return<T>(a: T): List
+{
+ Cons(a, Nil)
+}
+
+function Bind<T,U>(xs: List<T>, f: T -> List<U>): List<U>
+ requires Total(f)
+{
+ match xs
+ case Nil => Nil
+ case Cons(x, xs') => append(f(x), Bind(xs', f))
+}
+
+lemma LeftIdentity<T>(a: T, f: T -> List)
+ requires Total(f)
+ ensures Bind(Return(a), f) == f(a)
+{
+ AppendNil(f(a));
+}
+
+lemma RightIdentity<T>(m: List)
+ ensures Bind(m, Return) == m
+{
+ match m
+ case Nil =>
+ assert Bind<T,T>(Nil, Return) == Nil;
+ case Cons(x, m') =>
+ calc {
+ Bind(Cons(x, m'), Return);
+ append(Return(x), Bind(m', Return));
+ Cons(x, Bind(m', Return));
+ }
+}
+
+lemma Associativity<T>(m: List, f: T -> List, g: T -> List)
+ requires Total(f) && Total(g)
+ ensures Bind(Bind(m, f), g) == Bind(m, x => Bind(f(x), g))
+{
+ match m
+ case Nil =>
+ assert Bind(m, x => Bind(f(x), g)) == Nil;
+ case Cons(x, xs) =>
+ match f(x)
+ case Nil =>
+ calc {
+ Bind(xs, y => Bind(f(y), g));
+ Bind(Cons(x, xs), y => Bind(f(y), g));
+ }
+ case Cons(y, ys) =>
+ calc {
+ append(g(y), Bind(append(ys, Bind(xs, f)), g));
+ { BindOverAppend(ys, Bind(xs, f), g); }
+ append(g(y), append(Bind(ys, g), Bind(Bind(xs, f), g)));
+ { AppendAssoc(g(y), Bind(ys, g), Bind(Bind(xs, f), g)); }
+ append(append(g(y), Bind(ys, g)), Bind(Bind(xs, f), g));
+ Bind(Cons(x, xs), z => Bind(f(z), g));
+ }
+}
+
+lemma BindOverAppend<T>(xs: List, ys: List, g: T -> List)
+ requires Total(g)
+ ensures Bind(append(xs, ys), g) == append(Bind(xs, g), Bind(ys, g))
+{
+ match xs
+ case Nil =>
+ case Cons(x, xs') =>
+ AppendAssoc(g(x), Bind(xs', g), Bind(ys, g));
+}
diff --git a/Test/dafny4/MonadicLaws.dfy.expect b/Test/dafny4/MonadicLaws.dfy.expect
new file mode 100644
index 00000000..d903c7c5
--- /dev/null
+++ b/Test/dafny4/MonadicLaws.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 16 verified, 0 errors
diff --git a/Test/dafny4/NipkowKlein-chapter3.dfy b/Test/dafny4/NipkowKlein-chapter3.dfy
index 6572359a..3de6a5fc 100644
--- a/Test/dafny4/NipkowKlein-chapter3.dfy
+++ b/Test/dafny4/NipkowKlein-chapter3.dfy
@@ -18,7 +18,7 @@ function append(xs: List, ys: List): List
// ----- arithmetic expressions -----
type vname = string // variable names
-datatype aexp = N(n: int) | V(x: vname) | Plus(0: aexp, 1: aexp) // arithmetic expressions
+datatype aexp = N(n: int) | V(vname) | Plus(aexp, aexp) // arithmetic expressions
type val = int
type state = vname -> val
@@ -131,9 +131,15 @@ lemma AsimpCorrect(a: aexp, s: state)
forall a' | a' < a { AsimpCorrect(a', s); }
}
+// The following lemma is not in the Nipkow and Klein book, but it's a fun one to prove.
+lemma ASimplInvolutive(a: aexp)
+ ensures asimp(asimp(a)) == asimp(a)
+{
+}
+
// ----- boolean expressions -----
-datatype bexp = Bc(v: bool) | Not(op: bexp) | And(0: bexp, 1: bexp) | Less(a0: aexp, a1: aexp)
+datatype bexp = Bc(v: bool) | Not(bexp) | And(bexp, bexp) | Less(aexp, aexp)
function bval(b: bexp, s: state): bool
reads s.reads
@@ -189,9 +195,12 @@ lemma BsimpCorrect(b: bexp, s: state)
ensures bval(bsimp(b), s) == bval(b, s)
{
/* Here is one proof, which uses the induction hypothesis any anything smaller than b and also invokes
- the lemma AsimpCorrect on anything smaller than b.
+ the lemma AsimpCorrect on every arithmetic expression.
forall b' | b' < b { BsimpCorrect(b', s); }
- forall a' | a' < b { AsimpCorrect(a', s); }
+ forall a { AsimpCorrect(a, s); }
+ Yet another possibility is to mark the lemma with {:induction b} and to use the following line in
+ the body:
+ forall a { AsimpCorrect(a, s); }
*/
// Here is another proof, which makes explicit the uses of the induction hypothesis and the other lemma.
match b
diff --git a/Test/dafny4/NipkowKlein-chapter3.dfy.expect b/Test/dafny4/NipkowKlein-chapter3.dfy.expect
index ab18d98e..bb45fee9 100644
--- a/Test/dafny4/NipkowKlein-chapter3.dfy.expect
+++ b/Test/dafny4/NipkowKlein-chapter3.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 28 verified, 0 errors
+Dafny program verifier finished with 30 verified, 0 errors
diff --git a/Test/dafny4/NipkowKlein-chapter7.dfy b/Test/dafny4/NipkowKlein-chapter7.dfy
index 33be9dd6..0c089895 100644
--- a/Test/dafny4/NipkowKlein-chapter7.dfy
+++ b/Test/dafny4/NipkowKlein-chapter7.dfy
@@ -1,4 +1,4 @@
-// RUN: %dafny /compile:0 /rprint:"%t.rprint" "%s" > "%t"
+// RUN: %dafny /compile:0 /rprint:"%t.rprint" /autoTriggers:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// This file is a Dafny encoding of chapter 7 from "Concrete Semantics: With Isabelle/HOL" by
@@ -10,25 +10,19 @@ datatype List<T> = Nil | Cons(head: T, tail: List<T>)
type vname = string // variable names
type val = int
-type state = imap<vname, val>
-predicate Total(s: state)
-{
- forall x :: x in s
-}
+type state = map<vname, val>
datatype aexp = N(n: int) | V(x: vname) | Plus(0: aexp, 1: aexp) // arithmetic expressions
function aval(a: aexp, s: state): val
- requires Total(s)
{
match a
case N(n) => n
- case V(x) => s[x]
+ case V(x) => if x in s then s[x] else 0
case Plus(a0, a1) => aval(a0,s ) + aval(a1, s)
}
datatype bexp = Bc(v: bool) | Not(op: bexp) | And(0: bexp, 1: bexp) | Less(a0: aexp, a1: aexp)
function bval(b: bexp, s: state): bool
- requires Total(s)
{
match b
case Bc(v) => v
@@ -44,7 +38,6 @@ datatype com = SKIP | Assign(vname, aexp) | Seq(com, com) | If(bexp, com, com) |
// ----- Big-step semantics -----
inductive predicate big_step(c: com, s: state, t: state)
- requires Total(s)
{
match c
case SKIP =>
@@ -53,7 +46,6 @@ inductive predicate big_step(c: com, s: state, t: state)
t == s[x := aval(a, s)]
case Seq(c0, c1) =>
exists s' ::
- Total(s') &&
big_step(c0, s, s') &&
big_step(c1, s', t)
case If(b, thn, els) =>
@@ -61,13 +53,11 @@ inductive predicate big_step(c: com, s: state, t: state)
case While(b, body) =>
(!bval(b, s) && s == t) ||
(bval(b, s) && exists s' ::
- Total(s') &&
big_step(body, s, s') &&
big_step(While(b, body), s', t))
}
lemma Example1(s: state, t: state)
- requires Total(s)
requires t == s["x" := 5]["y" := 5]
ensures big_step(Seq(Assign("x", N(5)), Assign("y", V("x"))), s, t)
{
@@ -83,29 +73,13 @@ lemma Example1(s: state, t: state)
}
lemma SemiAssociativity(c0: com, c1: com, c2: com, s: state, t: state)
- requires Total(s)
ensures big_step(Seq(Seq(c0, c1), c2), s, t) == big_step(Seq(c0, Seq(c1, c2)), s, t)
{
- calc {
- big_step(Seq(Seq(c0, c1), c2), s, t);
- // def. big_step
- exists s'' :: Total(s'') && big_step(Seq(c0, c1), s, s'') && big_step(c2, s'', t);
- // def. big_step
- exists s'' :: Total(s'') && (exists s' :: Total(s') && big_step(c0, s, s') && big_step(c1, s', s'')) && big_step(c2, s'', t);
- // logic
- exists s', s'' :: Total(s') && Total(s'') && big_step(c0, s, s') && big_step(c1, s', s'') && big_step(c2, s'', t);
- // logic
- exists s' :: Total(s') && big_step(c0, s, s') && exists s'' :: Total(s'') && big_step(c1, s', s'') && big_step(c2, s'', t);
- // def. big_step
- exists s' :: Total(s') && big_step(c0, s, s') && big_step(Seq(c1, c2), s', t);
- // def. big_step
- big_step(Seq(c0, Seq(c1, c2)), s, t);
- }
}
predicate equiv_c(c: com, c': com)
{
- forall s,t :: Total(s) ==> big_step(c, s, t) == big_step(c', s, t)
+ forall s,t :: big_step(c, s, t) == big_step(c', s, t)
}
lemma lemma_7_3(b: bexp, c: com)
@@ -122,7 +96,7 @@ lemma lemma_7_5(b: bexp, c: com, c': com)
requires equiv_c(c, c')
ensures equiv_c(While(b, c), While(b, c'))
{
- forall s,t | Total(s)
+ forall s,t
ensures big_step(While(b, c), s, t) == big_step(While(b, c'), s, t)
{
if big_step(While(b, c), s, t) {
@@ -135,15 +109,9 @@ lemma lemma_7_5(b: bexp, c: com, c': com)
}
inductive lemma lemma_7_6(b: bexp, c: com, c': com, s: state, t: state)
- requires Total(s) && big_step(While(b, c), s, t) && equiv_c(c, c')
+ requires big_step(While(b, c), s, t) && equiv_c(c, c')
ensures big_step(While(b, c'), s, t)
{
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| Total(s') && big_step#[_k-1](c, s, s') && big_step#[_k-1](While(b, c), s', t);
- lemma_7_6(b, c, c', s', t); // induction hypothesis
- }
}
// equiv_c is an equivalence relation
@@ -161,36 +129,15 @@ lemma equiv_c_transitive(c: com, c': com, c'': com)
}
inductive lemma IMP_is_deterministic(c: com, s: state, t: state, t': state)
- requires Total(s) && big_step(c, s, t) && big_step(c, s, t')
+ requires big_step(c, s, t) && big_step(c, s, t')
ensures t == t'
{
- match c
- case SKIP =>
- // trivial
- case Assign(x, a) =>
- // trivial
- case Seq(c0, c1) =>
- var s' :| Total(s') && big_step#[_k-1](c0, s, s') && big_step#[_k-1](c1, s', t);
- var s'' :| Total(s'') && big_step#[_k-1](c0, s, s'') && big_step#[_k-1](c1, s'', t');
- IMP_is_deterministic(c0, s, s', s'');
- IMP_is_deterministic(c1, s', t, t');
- case If(b, thn, els) =>
- IMP_is_deterministic(if bval(b, s) then thn else els, s, t, t');
- case While(b, body) =>
- if !bval(b, s) {
- // trivial
- } else {
- var s' :| Total(s') && big_step#[_k-1](body, s, s') && big_step#[_k-1](While(b, body), s', t);
- var s'' :| Total(s'') && big_step#[_k-1](body, s, s'') && big_step#[_k-1](While(b, body), s'', t');
- IMP_is_deterministic(body, s, s', s'');
- IMP_is_deterministic(While(b, body), s', t, t');
- }
+ // Dafny totally rocks!
}
// ----- Small-step semantics -----
inductive predicate small_step(c: com, s: state, c': com, s': state)
- requires Total(s)
{
match c
case SKIP => false
@@ -206,7 +153,6 @@ inductive predicate small_step(c: com, s: state, c': com, s': state)
}
inductive lemma SmallStep_is_deterministic(cs: (com, state), cs': (com, state), cs'': (com, state))
- requires Total(cs.1)
requires small_step(cs.0, cs.1, cs'.0, cs'.1)
requires small_step(cs.0, cs.1, cs''.0, cs''.1)
ensures cs' == cs''
@@ -216,100 +162,59 @@ inductive lemma SmallStep_is_deterministic(cs: (com, state), cs': (com, state),
case Seq(c0, c1) =>
if c0 == SKIP {
} else {
- var c0' :| cs'.0 == Seq(c0', c1) && small_step#[_k-1](c0, cs.1, c0', cs'.1);
- var c0'' :| cs''.0 == Seq(c0'', c1) && small_step#[_k-1](c0, cs.1, c0'', cs''.1);
+ var c0' :| cs'.0 == Seq(c0', c1) && small_step(c0, cs.1, c0', cs'.1);
+ var c0'' :| cs''.0 == Seq(c0'', c1) && small_step(c0, cs.1, c0'', cs''.1);
SmallStep_is_deterministic((c0, cs.1), (c0', cs'.1), (c0'', cs''.1));
}
case If(b, thn, els) =>
case While(b, body) =>
}
-inductive lemma small_step_ends_in_Total_state(c: com, s: state, c': com, s': state)
- requires Total(s) && small_step(c, s, c', s')
- ensures Total(s')
-{
- match c
- case Assign(x, a) =>
- case Seq(c0, c1) =>
- if c0 != SKIP {
- var c0' :| c' == Seq(c0', c1) && small_step(c0, s, c0', s');
- small_step_ends_in_Total_state(c0, s, c0', s');
- }
- case If(b, thn, els) =>
- case While(b, body) =>
-}
-
inductive predicate small_step_star(c: com, s: state, c': com, s': state)
- requires Total(s)
{
(c == c' && s == s') ||
exists c'', s'' ::
- small_step(c, s, c'', s'') &&
- (small_step_ends_in_Total_state(c, s, c'', s''); small_step_star(c'', s'', c', s'))
-}
-
-inductive lemma small_step_star_ends_in_Total_state(c: com, s: state, c': com, s': state)
- requires Total(s) && small_step_star(c, s, c', s')
- ensures Total(s')
-{
- if c == c' && s == s' {
- } else {
- var c'', s'' :| small_step(c, s, c'', s'') &&
- (small_step_ends_in_Total_state(c, s, c'', s''); small_step_star#[_k-1](c'', s'', c', s'));
- small_step_star_ends_in_Total_state(c'', s'', c', s');
- }
+ small_step(c, s, c'', s'') && small_step_star(c'', s'', c', s')
}
lemma star_transitive(c0: com, s0: state, c1: com, s1: state, c2: com, s2: state)
- requires Total(s0) && Total(s1)
requires small_step_star(c0, s0, c1, s1) && small_step_star(c1, s1, c2, s2)
ensures small_step_star(c0, s0, c2, s2)
{
star_transitive_aux(c0, s0, c1, s1, c2, s2);
}
inductive lemma star_transitive_aux(c0: com, s0: state, c1: com, s1: state, c2: com, s2: state)
- requires Total(s0) && Total(s1)
requires small_step_star(c0, s0, c1, s1)
ensures small_step_star(c1, s1, c2, s2) ==> small_step_star(c0, s0, c2, s2)
{
- if c0 == c1 && s0 == s1 {
- } else {
- var c', s' :|
- small_step(c0, s0, c', s') &&
- (small_step_ends_in_Total_state(c0, s0, c', s'); small_step_star#[_k-1](c', s', c1, s1));
- star_transitive_aux(c', s', c1, s1, c2, s2);
- }
}
// The big-step semantics can be simulated by some number of small steps
inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
- requires Total(s) && big_step(c, s, t)
+ requires big_step(c, s, t)
ensures small_step_star(c, s, SKIP, t)
{
match c
case SKIP =>
// trivial
case Assign(x, a) =>
- assert t == s[x := aval(a, s)];
- assert small_step(c, s, SKIP, t);
assert small_step_star(SKIP, t, SKIP, t);
case Seq(c0, c1) =>
- var s' :| Total(s') && big_step#[_k-1](c0, s, s') && big_step#[_k-1](c1, s', t);
+ var s' :| big_step(c0, s, s') && big_step(c1, s', t);
calc <== {
small_step_star(c, s, SKIP, t);
{ star_transitive(Seq(c0, c1), s, Seq(SKIP, c1), s', SKIP, t); }
small_step_star(Seq(c0, c1), s, Seq(SKIP, c1), s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ lemma_7_13(c0, s, SKIP, s', c1); }
small_step_star(c0, s, SKIP, s') && small_step_star(Seq(SKIP, c1), s', SKIP, t);
- { BigStep_implies_SmallStepStar(c0, s, s'); }
+ //{ BigStep_implies_SmallStepStar(c0, s, s'); }
small_step_star(Seq(SKIP, c1), s', SKIP, t);
{ assert small_step(Seq(SKIP, c1), s', c1, s'); }
small_step_star(c1, s', SKIP, t);
- { BigStep_implies_SmallStepStar(c1, s', t); }
+ //{ BigStep_implies_SmallStepStar(c1, s', t); }
true;
}
case If(b, thn, els) =>
- BigStep_implies_SmallStepStar(if bval(b, s) then thn else els, s, t);
case While(b, body) =>
if !bval(b, s) && s == t {
calc <== {
@@ -321,7 +226,7 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
true;
}
} else {
- var s' :| Total(s') && big_step#[_k-1](body, s, s') && big_step#[_k-1](While(b, body), s', t);
+ var s' :| big_step(body, s, s') && big_step(While(b, body), s', t);
calc <== {
small_step_star(c, s, SKIP, t);
{ assert small_step(c, s, If(b, Seq(body, While(b, body)), SKIP), s); }
@@ -332,41 +237,40 @@ inductive lemma BigStep_implies_SmallStepStar(c: com, s: state, t: state)
small_step_star(Seq(body, While(b, body)), s, Seq(SKIP, While(b, body)), s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ lemma_7_13(body, s, SKIP, s', While(b, body)); }
small_step_star(body, s, SKIP, s') && small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
- { BigStep_implies_SmallStepStar(body, s, s'); }
+ //{ BigStep_implies_SmallStepStar(body, s, s'); }
small_step_star(Seq(SKIP, While(b, body)), s', SKIP, t);
{ assert small_step(Seq(SKIP, While(b, body)), s', While(b, body), s'); }
small_step_star(While(b, body), s', SKIP, t);
- { BigStep_implies_SmallStepStar(While(b, body), s', t); }
+ //{ BigStep_implies_SmallStepStar(While(b, body), s', t); }
true;
}
}
}
inductive lemma lemma_7_13(c0: com, s0: state, c: com, t: state, c1: com)
- requires Total(s0) && small_step_star(c0, s0, c, t)
+ requires small_step_star(c0, s0, c, t)
ensures small_step_star(Seq(c0, c1), s0, Seq(c, c1), t)
{
if c0 == c && s0 == t {
} else {
- var c', s' :| small_step(c0, s0, c', s') && (small_step_ends_in_Total_state(c0, s0, c', s'); small_step_star#[_k-1](c', s', c, t));
+ var c', s' :| small_step(c0, s0, c', s') && small_step_star(c', s', c, t);
lemma_7_13(c', s', c, t, c1);
}
}
inductive lemma SmallStepStar_implies_BigStep(c: com, s: state, t: state)
- requires Total(s) && small_step_star(c, s, SKIP, t)
+ requires small_step_star(c, s, SKIP, t)
ensures big_step(c, s, t)
{
if c == SKIP && s == t {
} else {
- var c', s' :| small_step(c, s, c', s') && (small_step_ends_in_Total_state(c, s, c', s'); small_step_star#[_k-1](c', s', SKIP, t));
- SmallStepStar_implies_BigStep(c', s', t);
+ var c', s' :| small_step(c, s, c', s') && small_step_star(c', s', SKIP, t);
SmallStep_plus_BigStep(c, s, c', s', t);
}
}
inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t: state)
- requires Total(s) && Total(s') && small_step(c, s, c', s')
+ requires small_step(c, s, c', s')
ensures big_step(c', s', t) ==> big_step(c, s, t)
{
match c
@@ -376,9 +280,7 @@ inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t:
} else {
var c0' :| c' == Seq(c0', c1) && small_step(c0, s, c0', s');
if big_step(c', s', t) {
- var k: nat :| big_step#[k](Seq(c0', c1), s', t);
- var s'' :| Total(s'') && big_step(c0', s', s'') && big_step(c1, s'', t);
- SmallStep_plus_BigStep(c0, s, c0', s', s'');
+ var s'' :| big_step(c0', s', s'') && big_step(c1, s'', t);
}
}
case If(b, thn, els) =>
@@ -391,7 +293,6 @@ inductive lemma SmallStep_plus_BigStep(c: com, s: state, c': com, s': state, t:
// big-step and small-step semantics agree
lemma BigStep_SmallStepStar_Same(c: com, s: state, t: state)
- requires Total(s)
ensures big_step(c, s, t) <==> small_step_star(c, s, SKIP, t)
{
if big_step(c, s, t) {
@@ -403,14 +304,12 @@ lemma BigStep_SmallStepStar_Same(c: com, s: state, t: state)
}
predicate final(c: com, s: state)
- requires Total(s)
{
!exists c',s' :: small_step(c, s, c', s')
}
// lemma 7.17:
lemma final_is_skip(c: com, s: state)
- requires Total(s)
ensures final(c, s) <==> c == SKIP
{
if c == SKIP {
@@ -420,7 +319,7 @@ lemma final_is_skip(c: com, s: state)
}
}
lemma only_skip_has_no_next_state(c: com, s: state) returns (c': com, s': state)
- requires Total(s) && c != SKIP
+ requires c != SKIP
ensures small_step(c, s, c', s')
{
match c
@@ -441,15 +340,12 @@ lemma only_skip_has_no_next_state(c: com, s: state) returns (c': com, s': state)
}
lemma lemma_7_18(c: com, s: state)
- requires Total(s)
ensures (exists t :: big_step(c, s, t)) <==>
- (exists c',s' :: small_step_star(c, s, c', s') &&
- (small_step_star_ends_in_Total_state(c, s, c', s'); final(c', s')))
+ (exists c',s' :: small_step_star(c, s, c', s') && final(c', s'))
{
if exists t :: big_step(c, s, t) {
var t :| big_step(c, s, t);
BigStep_SmallStepStar_Same(c, s, t);
- small_step_star_ends_in_Total_state(c, s, SKIP, t);
calc ==> {
true;
big_step(c, s, t);
@@ -458,11 +354,11 @@ lemma lemma_7_18(c: com, s: state)
small_step_star(c, s, SKIP, t) && final(SKIP, t);
}
}
- if exists c',s' :: small_step_star(c, s, c', s') &&
- (small_step_star_ends_in_Total_state(c, s, c', s'); final(c', s')) {
- var c',s' :| small_step_star(c, s, c', s') &&
- (small_step_star_ends_in_Total_state(c, s, c', s'); final(c', s'));
+ if exists c',s' :: small_step_star(c, s, c', s') && final(c', s') {
+ var c',s' :| small_step_star(c, s, c', s') && final(c', s');
final_is_skip(c', s');
BigStep_SmallStepStar_Same(c, s, s');
}
}
+
+// Autotriggers:0 added as this file relies on proving a property of the form body(f) == f
diff --git a/Test/dafny4/NipkowKlein-chapter7.dfy.expect b/Test/dafny4/NipkowKlein-chapter7.dfy.expect
index e08b3632..90432af3 100644
--- a/Test/dafny4/NipkowKlein-chapter7.dfy.expect
+++ b/Test/dafny4/NipkowKlein-chapter7.dfy.expect
@@ -1,2 +1,2 @@
-Dafny program verifier finished with 54 verified, 0 errors
+Dafny program verifier finished with 49 verified, 0 errors
diff --git a/Test/dafny4/NumberRepresentations.dfy b/Test/dafny4/NumberRepresentations.dfy
index 1ebdf64c..c15f4987 100644
--- a/Test/dafny4/NumberRepresentations.dfy
+++ b/Test/dafny4/NumberRepresentations.dfy
@@ -1,4 +1,4 @@
-// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
+// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// We consider a number representation that consists of a sequence of digits. The least
@@ -234,17 +234,16 @@ lemma UniqueRepresentation(a: seq<int>, b: seq<int>, lowDigit: int, base: nat)
}
lemma ZeroIsUnique(a: seq<int>, lowDigit: int, base: nat)
- requires 2 <= base && lowDigit <= 0 < lowDigit + base;
- requires a == trim(a);
- requires IsSkewNumber(a, lowDigit, base);
- requires eval(a, base) == 0;
- ensures a == [];
+ requires 2 <= base && lowDigit <= 0 < lowDigit + base
+ requires a == trim(a)
+ requires IsSkewNumber(a, lowDigit, base)
+ requires eval(a, base) == 0
+ ensures a == []
{
if a != [] {
- assert eval(a, base) == a[0] + base * eval(a[1..], base);
if eval(a[1..], base) == 0 {
TrimProperty(a);
- ZeroIsUnique(a[1..], lowDigit, base);
+ // ZeroIsUnique(a[1..], lowDigit, base);
}
assert false;
}
@@ -293,3 +292,7 @@ lemma MulInverse(x: int, a: int, b: int, y: int)
ensures a == b;
{
}
+
+// Local Variables:
+// dafny-prover-local-args: ("/vcsMaxKeepGoingSplits:5")
+// End:
diff --git a/Test/dafny4/Primes.dfy b/Test/dafny4/Primes.dfy
index 31e3a19b..0c2a64dd 100644
--- a/Test/dafny4/Primes.dfy
+++ b/Test/dafny4/Primes.dfy
@@ -1,9 +1,9 @@
-// RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
+// RUN: %dafny /compile:0 /dprint:"%t.dprint" /autoTriggers:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
predicate IsPrime(n: int)
{
- 2 <= n && forall m :: 2 <= m < n ==> n % m != 0
+ 2 <= n && forall m {:nowarn} :: 2 <= m < n ==> n % m != 0 // WISH It would be great to think about the status of modulo as a trigger
}
// The following theorem shows that there is an infinite number of primes
@@ -110,6 +110,13 @@ lemma RemoveFactor(x: int, s: set<int>)
x * y * product(s - {y} - {x});
{ assert s - {y} - {x} == s - {x} - {y}; }
x * y * product(s - {x} - {y});
+ /* FIXME: This annotation wasn't needed before the introduction
+ * of auto-triggers. It's not needed if one adds {:no_trigger}
+ * to the forall y :: y in s ==> y <= x part of PickLargest, but that
+ * boils down to z3 picking $Box(...) as good trigger
+ */
+ // FIXME: the parens shouldn't be needed around (s - {x})
+ { assert y in (s - {x}); }
{ assert y == PickLargest(s - {x}); }
x * product(s - {x});
}
@@ -160,8 +167,8 @@ lemma Composite(c: int) returns (a: int, b: int)
calc {
true;
!IsPrime(c);
- !(2 <= c && forall m :: 2 <= m < c ==> c % m != 0);
- exists m :: 2 <= m < c && c % m == 0;
+ !(2 <= c && forall m {:nowarn} :: 2 <= m < c ==> c % m != 0);
+ exists m {:nowarn} :: 2 <= m < c && c % m == 0;
}
a :| 2 <= a < c && c % a == 0;
b := c / a;
@@ -187,7 +194,7 @@ lemma LargestElementExists(s: set<int>)
var s' := s;
while true
invariant s' != {} && s' <= s;
- invariant forall x,y :: x in s' && y in s - s' ==> y <= x;
+ invariant forall x,y {:nowarn} :: x in s' && y in s - s' ==> y <= x;
decreases s';
{
var x :| x in s'; // pick something
diff --git a/Test/dafny4/Regression0.dfy b/Test/dafny4/Regression0.dfy
new file mode 100644
index 00000000..666d9575
--- /dev/null
+++ b/Test/dafny4/Regression0.dfy
@@ -0,0 +1,13 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+// This once crashed Dafny
+
+method M() {
+ var s := [1, "2"]; // error: all elements must have the same type
+ if * {
+ assert exists n :: n in s && n != 1; // the type of n is inferred to be int
+ } else {
+ assert "2" in s; // error: since the type of s wasn't determined
+ }
+}
diff --git a/Test/dafny4/Regression0.dfy.expect b/Test/dafny4/Regression0.dfy.expect
new file mode 100644
index 00000000..566b3e3f
--- /dev/null
+++ b/Test/dafny4/Regression0.dfy.expect
@@ -0,0 +1,3 @@
+Regression0.dfy(7,15): Error: All elements of display must be of the same type (got string, but type of previous elements is int)
+Regression0.dfy(11,15): Error: second argument to "in" must be a set, multiset, or sequence with elements of type string, or a map with domain string (instead got ?)
+2 resolution/type errors detected in Regression0.dfy
diff --git a/Test/dafny4/Regression1.dfy b/Test/dafny4/Regression1.dfy
new file mode 100644
index 00000000..ebd8cf6d
--- /dev/null
+++ b/Test/dafny4/Regression1.dfy
@@ -0,0 +1,9 @@
+// RUN: %dafny /compile:0 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+
+ghost method M() {
+ var x :=
+// In the following line, why are the range and term copied in Substitute?
+// var loo := 100; map y | 0 <= y < 100 :: y+1;
+ var loo := 100; imap y: int | true :: 3;
+}
diff --git a/Test/dafny4/Regression1.dfy.expect b/Test/dafny4/Regression1.dfy.expect
new file mode 100644
index 00000000..069e7767
--- /dev/null
+++ b/Test/dafny4/Regression1.dfy.expect
@@ -0,0 +1,2 @@
+
+Dafny program verifier finished with 2 verified, 0 errors
diff --git a/Test/dafny4/SoftwareFoundations-Basics.dfy.expect b/Test/dafny4/SoftwareFoundations-Basics.dfy.expect
index 0f9eb8d0..f07b068f 100644
--- a/Test/dafny4/SoftwareFoundations-Basics.dfy.expect
+++ b/Test/dafny4/SoftwareFoundations-Basics.dfy.expect
@@ -1,4 +1,4 @@
-SoftwareFoundations-Basics.dfy(41,12): Error: assertion violation
+SoftwareFoundations-Basics.dfy(41,11): Error: assertion violation
Execution trace:
(0,0): anon0
diff --git a/Test/dafny4/UnionFind.dfy b/Test/dafny4/UnionFind.dfy
new file mode 100644
index 00000000..6c4b4cfb
--- /dev/null
+++ b/Test/dafny4/UnionFind.dfy
@@ -0,0 +1,332 @@
+// RUN: %dafny /rprint:"%t.rprint" /autoTriggers:1 "%s" > "%t"
+// RUN: %diff "%s.expect" "%t"
+// Rustan Leino, Nov 2015
+
+// Module M0 gives the high-level specification of the UnionFind data structure
+abstract module M0 {
+ class Element { }
+
+ class {:autocontracts} UnionFind {
+ ghost var M: map<Element, Element>
+ protected predicate Valid()
+ reads this, Repr
+
+ constructor ()
+ ensures M == map[]
+ {
+ Repr, M:= {this}, map[];
+ }
+
+ method New() returns (e: Element)
+ ensures old(e !in M && forall e' :: e' in M ==> M[e'] != e)
+ ensures M == old(M)[e := e]
+
+ method Find(e: Element) returns (r: Element)
+ requires e in M
+ ensures M == old(M) && M[e] == r
+
+ method Union(e0: Element, e1: Element) returns (ghost r: Element)
+ requires e0 in M && e1 in M
+ ensures r in old(M) && old(M[r] == M[e0] || M[r] == M[e1])
+ ensures M == map e | e in old(M) :: old(if M[e] == M[e0] || M[e] == M[e1] then r else M[e])
+ }
+}
+
+// Module M1 gives the concrete data structure used in UnionFind, along with the invariant of
+// that data structure. It also implements the New method and, by declaring a new method Join,
+// the Union method.
+abstract module M1 refines M0 {
+ datatype Contents = Root(depth: nat) | Link(next: Element)
+ class Element {
+ var c: Contents
+ }
+
+ type CMap = map<Element, Contents>
+ predicate GoodCMap(C: CMap)
+ {
+ null !in C && forall f :: f in C && C[f].Link? ==> C[f].next in C
+ }
+
+ class UnionFind {
+ protected predicate Valid...
+ {
+ (forall e :: e in M ==> e in Repr && M[e] in M && M[M[e]] == M[e]) &&
+ (forall e :: e in M && e.c.Link? ==> e.c.next in M) &&
+ (forall e :: e in M ==> M[e].c.Root? && Reaches(M[e].c.depth, e, M[e], Collect()))
+ }
+
+ // This function returns a snapshot of the .c fields of the objects in the domain of M
+ function Collect(): CMap
+ requires null !in M && forall f :: f in M && f.c.Link? ==> f.c.next in M
+ reads this, set a | a in M
+ ensures GoodCMap(Collect())
+ {
+ map e | e in M :: e.c
+ }
+ predicate Reaches(d: nat, e: Element, r: Element, C: CMap)
+ requires GoodCMap(C)
+ requires e in C
+ {
+ match C[e]
+ case Root(_) => e == r
+ case Link(next) => d != 0 && Reaches(d-1, next, r, C)
+ }
+ lemma {:autocontracts false} Reaches_Monotonic(d: nat, e: Element, r: Element, C: CMap, C': CMap)
+ requires GoodCMap(C) && GoodCMap(C')
+ requires e in C
+ requires Reaches(d, e, r, C) && forall f :: f in C ==> f in C' && C[f] == C'[f]
+ ensures Reaches(d, e, r, C')
+ {
+ }
+
+ method New...
+ {
+ e := new Element;
+ e.c := Root(0);
+ Repr := Repr + {e};
+ M := M[e := e];
+ assert Reaches(0, e, e, Collect());
+ forall f | f in M
+ ensures M[f].c.Root? && Reaches(M[f].c.depth, f, M[f], Collect())
+ {
+ if f != e {
+ Reaches_Monotonic(M[f].c.depth, f, M[f], old(Collect()), Collect());
+ }
+ }
+ }
+
+ method Union...
+ {
+ var r0 := Find(e0);
+ var r1 := Find(e1);
+ r := Join(r0, r1);
+ }
+
+ method Join(r0: Element, r1: Element) returns (ghost r: Element)
+ requires r0 in M && r1 in M && M[r0] == r0 && M[r1] == r1
+ ensures r == r0 || r == r1
+ ensures M == map e | e in old(M) :: old(if M[e] == r0 || M[e] == r1 then r else M[e])
+ }
+
+ // stupid help lemma to get around boxing
+ lemma MapsEqual<D>(m: map<D, D>, n: map<D, D>)
+ requires forall d :: d in m <==> d in n;
+ requires forall d :: d in m ==> m[d] == n[d]
+ ensures m == n
+ {
+ }
+}
+
+// Module M2 adds the implementation of Find, together with the proofs needed for the verification.
+abstract module M2 refines M1 {
+ class UnionFind {
+ method Find...
+ {
+ r := FindAux(M[e].c.depth, e);
+ }
+
+ lemma NextReachesSame(e: Element)
+ requires e in M && e.c.Link?
+ ensures M[e] == M[e.c.next]
+ {
+ var next := e.c.next;
+ var d0, d1 := M[e].c.depth, M[next].c.depth;
+ assert Reaches(d0 - 1, next, M[e], Collect());
+ assert Reaches(d1, next, M[next], Collect());
+ Reaches_SinkIsFunctionOfStart(d0 - 1, d1, next, M[e], M[next], Collect());
+ }
+
+ lemma {:autocontracts false} Reaches_SinkIsFunctionOfStart(d0: nat, d1: nat, e: Element, r0: Element, r1: Element, C: CMap)
+ requires GoodCMap(C)
+ requires e in C
+ requires Reaches(d0, e, r0, C) && Reaches(d1, e, r1, C)
+ ensures r0 == r1
+ {
+ }
+
+ method FindAux(ghost d: nat, e: Element) returns (r: Element)
+ requires e in M && Reaches(d, e, M[e], Collect())
+ ensures M == old(M) && M[e] == r
+ ensures forall d: nat, e, r :: e in old(Collect()) && Reaches(d, e, r, old(Collect())) ==> Reaches(d, e, r, Collect())
+ {
+ match e.c
+ case Root(_) =>
+ r := e;
+ case Link(next) =>
+ NextReachesSame(e);
+ r := FindAux(d-1, next);
+ ghost var C := Collect(); // take a snapshot of all the .c fields
+ e.c := Link(r);
+ UpdateMaintainsReaches(d, e, r, C, Collect());
+ }
+
+ lemma {:autocontracts false} UpdateMaintainsReaches(td: nat, tt: Element, tm: Element, C: CMap, C': CMap)
+ requires GoodCMap(C)
+ requires tt in C && Reaches(td, tt, tm, C)
+ requires C' == C[tt := Link(tm)] && C'[tt].Link? && tm in C' && C'[tm].Root?
+ requires null !in C' && forall f :: f in C && C'[f].Link? ==> C'[f].next in C
+ ensures forall d: nat, e, r :: e in C && Reaches(d, e, r, C) ==> Reaches(d, e, r, C')
+ {
+ forall d: nat, e, r | e in C && Reaches(d, e, r, C)
+ ensures Reaches(d, e, r, C')
+ {
+ ConstructReach(d, e, r, C, td, tt, tm, C');
+ }
+ }
+
+ lemma {:autocontracts false} ConstructReach(d: nat, e: Element, r: Element, C: CMap, td: nat, tt: Element, tm: Element, C': CMap)
+ requires GoodCMap(C)
+ requires e in C
+ requires Reaches(d, e, r, C)
+ requires tt in C && Reaches(td, tt, tm, C);
+ requires C' == C[tt := Link(tm)] && C'[tt].Link? && tm in C' && C'[tm].Root?
+ requires GoodCMap(C')
+ ensures Reaches(d, e, r, C')
+ {
+ if e == tt {
+ Reaches_SinkIsFunctionOfStart(d, td, e, r, tm, C);
+ } else {
+ match C[e]
+ case Root(_) =>
+ case Link(next) =>
+ ConstructReach(d-1, next, r, C, td, tt, tm, C');
+ }
+ }
+ }
+}
+
+// Finally, module M3 adds the implementation of Join, along with what's required to
+// verify its correctness.
+module M3 refines M2 {
+ class UnionFind {
+ method Join...
+ {
+ if r0 == r1 {
+ r := r0;
+ MapsEqual(M, map e | e in M :: if M[e] == r0 || M[e] == r1 then r else M[e]);
+ return;
+ } else if r0.c.depth < r1.c.depth {
+ r0.c := Link(r1);
+ r := r1;
+ M := map e | e in M :: if M[e] == r0 || M[e] == r1 then r else M[e];
+ forall e | e in Collect()
+ ensures M[e].c.Root? && Reaches(M[e].c.depth, e, M[e], Collect())
+ {
+ JoinMaintainsReaches0(r0, r1, old(Collect()), Collect());
+ assert Reaches(old(M[e].c).depth, e, old(M)[e], old(Collect()));
+ }
+ } else if r1.c.depth < r0.c.depth {
+ r1.c := Link(r0);
+ r := r0;
+ M := map e | e in M :: if M[e] == r0 || M[e] == r1 then r else M[e];
+ forall e | e in Collect()
+ ensures M[e].c.Root? && Reaches(M[e].c.depth, e, M[e], Collect())
+ {
+ JoinMaintainsReaches0(r1, r0, old(Collect()), Collect());
+ assert Reaches(old(M[e].c).depth, e, old(M)[e], old(Collect()));
+ }
+ } else {
+ r0.c := Link(r1);
+ r1.c := Root(r1.c.depth + 1);
+ r := r1;
+ M := map e | e in M :: if M[e] == r0 || M[e] == r1 then r else M[e];
+ forall e | e in Collect()
+ ensures M[e].c.Root? && Reaches(M[e].c.depth, e, M[e], Collect())
+ {
+ JoinMaintainsReaches1(r0, r1, old(Collect()), Collect());
+ assert Reaches(old(M[e].c).depth, e, old(M)[e], old(Collect()));
+ }
+ }
+ }
+
+ lemma {:autocontracts false} JoinMaintainsReaches1(r0: Element, r1: Element, C: CMap, C': CMap)
+ requires GoodCMap(C)
+ requires r0 in C && r1 in C && C[r0].Root? && C[r1].Root? && C[r0].depth == C[r1].depth && r0 != r1
+ requires C' == C[r0 := Link(r1)][r1 := Root(C[r1].depth + 1)]
+ requires GoodCMap(C')
+ ensures forall d: nat, e, r :: e in C && Reaches(d, e, r, C) && r != r0 && r != r1 ==> Reaches(d, e, r, C') // proved automatically by induction
+ ensures forall e :: e in C && Reaches(C[r0].depth, e, r0, C) ==> Reaches(C'[r1].depth, e, r1, C')
+ ensures forall e :: e in C && Reaches(C[r1].depth, e, r1, C) ==> Reaches(C'[r1].depth, e, r1, C')
+ {
+ forall e | e in C && Reaches(C[r0].depth, e, r0, C)
+ ensures Reaches(C'[r1].depth, e, r1, C')
+ {
+ ExtendedReach'(e, C, C[r0].depth, C'[r1].depth, r0, r1, C');
+ }
+ forall e | e in C && Reaches(C[r1].depth, e, r1, C)
+ ensures Reaches(C'[r1].depth, e, r1, C')
+ {
+ ReachUnaffectedByChangeFromRoot'(C[r1].depth, e, r1, C, C[r0].depth, r0, r1, C');
+ }
+ }
+
+ lemma {:autocontracts false} ReachUnaffectedByChangeFromRoot'(d: nat, e: Element, r: Element, C: CMap, td: nat, r0: Element, r1: Element, C': CMap)
+ requires GoodCMap(C)
+ requires e in C && Reaches(d, e, r, C)
+ requires r0 in C && r1 in C && C[r0].Root? && C[r1].Root? && C[r0].depth == C[r1].depth && r0 != r1
+ requires C[r0].Root? && C' == C[r0 := Link(r1)][r1 := Root(C[r1].depth + 1)]
+ requires Reaches(td, r0, r0, C) && r0 != r
+ requires GoodCMap(C')
+ ensures Reaches(d+1, e, r, C')
+ {
+ }
+
+ lemma {:autocontracts false} ExtendedReach'(e: Element, C: CMap, d0: nat, d1: nat, r0: Element, r1: Element, C': CMap)
+ requires GoodCMap(C) && GoodCMap(C')
+ requires r0 in C && r1 in C && C[r0].Root? && C[r1].Root? && C[r0].depth == C[r1].depth && r0 != r1
+ requires C' == C[r0 := Link(r1)][r1 := Root(C[r1].depth + 1)]
+ requires C[r0].Root? && d0 <= C[r0].depth && C[r1].Root? && d1 <= C'[r1].depth && d0 < d1
+ requires e in C && Reaches(d0, e, r0, C)
+ ensures Reaches(d1, e, r1, C')
+ {
+ match C[e]
+ case Root(_) =>
+ case Link(next) =>
+ ExtendedReach'(next, C, d0-1, d1-1, r0, r1, C');
+ }
+
+ lemma {:autocontracts false} JoinMaintainsReaches0(r0: Element, r1: Element, C: CMap, C': CMap)
+ requires GoodCMap(C)
+ requires r0 in C && r1 in C && C[r0].Root? && C[r1].Root? && C[r0].depth < C[r1].depth
+ requires C' == C[r0 := Link(r1)]
+ requires GoodCMap(C')
+ ensures forall d: nat, e, r :: e in C && Reaches(d, e, r, C) && r != r0 ==> Reaches(d, e, r, C')
+ ensures forall e :: e in C && Reaches(C[r0].depth, e, r0, C) ==> Reaches(C[r1].depth, e, r1, C')
+ {
+ forall d: nat, e, r | e in C && Reaches(d, e, r, C) && r != r0
+ ensures Reaches(d, e, r, C')
+ {
+ ReachUnaffectedByChangeFromRoot(d, e, r, C, C[r0].depth, r0, r1, C');
+ }
+ forall e | e in C && Reaches(C[r0].depth, e, r0, C)
+ ensures Reaches(C[r1].depth, e, r1, C')
+ {
+ ExtendedReach(e, C, C[r0].depth, C[r1].depth, r0, r1, C');
+ }
+ }
+
+ lemma {:autocontracts false} ReachUnaffectedByChangeFromRoot(d: nat, e: Element, r: Element, C: CMap, td: nat, tt: Element, tm: Element, C': CMap)
+ requires GoodCMap(C)
+ requires e in C && Reaches(d, e, r, C)
+ requires tt in C && Reaches(td, tt, tt, C) && tt != r
+ requires C[tt].Root? && C' == C[tt := Link(tm)]
+ requires GoodCMap(C')
+ ensures Reaches(d, e, r, C')
+ {
+ }
+
+ lemma {:autocontracts false} ExtendedReach(e: Element, C: CMap, d0: nat, d1: nat, r0: Element, r1: Element, C': CMap)
+ requires GoodCMap(C) && GoodCMap(C')
+ requires r0 in C && r1 in C && r0 != r1
+ requires C' == C[r0 := Link(r1)]
+ requires C[r0].Root? && d0 <= C[r0].depth && C[r1].Root? && d1 <= C[r1].depth && d0 < d1
+ requires e in C && Reaches(d0, e, r0, C)
+ ensures Reaches(d1, e, r1, C')
+ {
+ match C[e]
+ case Root(_) =>
+ case Link(next) =>
+ ExtendedReach(next, C, d0-1, d1-1, r0, r1, C');
+ }
+ }
+}
diff --git a/Test/dafny4/UnionFind.dfy.expect b/Test/dafny4/UnionFind.dfy.expect
new file mode 100644
index 00000000..8c2a9667
--- /dev/null
+++ b/Test/dafny4/UnionFind.dfy.expect
@@ -0,0 +1,3 @@
+
+Dafny program verifier finished with 89 verified, 0 errors
+Compiled assembly into UnionFind.dll
diff --git a/Test/dafny4/set-compr.dfy b/Test/dafny4/set-compr.dfy
index 71a07f3d..d093a924 100644
--- a/Test/dafny4/set-compr.dfy
+++ b/Test/dafny4/set-compr.dfy
@@ -22,7 +22,7 @@ method O() returns (ghost p: set<object>)
method P() returns (p: set<object>)
{
- p := set o: object | true; // not allowed -- not in a ghost context
+ p := set o: object | true; // error: not (easily) compilable
}
ghost method Q() returns (p: set<object>)
@@ -30,26 +30,54 @@ ghost method Q() returns (p: set<object>)
p := set o: object | true; // allowed, since the whole method is ghost
}
-function F(): int
+function F(p: object): int
+ requires p in set o: object | true // error: function is not allowed to depend on allocation state
+ ensures p in set o: object | true // error: ditto (although one could argue that this would be okay)
+ reads set o: object | true // error: same as for 'requires'
+ decreases set o: object | true // error: same as for 'ensures'
+{
+ if p in set o: object | true then // error: function is not allowed to depend on allocation state
+ F(p)
+ else
+ 0
+}
+
+function method G(p: object): int
+ requires p in set o: object | true // error (see F)
+ ensures p in set o: object | true // error (see F)
+ reads set o: object | true // error (see F)
+ decreases set o: object | true // error (see F)
+{
+ if p in set o: object | true then // error (see F)
+ G(p)
+ else
+ 0
+}
+
+method M0() returns (ghost r: int, s: int)
requires null in set o: object | true // allowed
ensures null in set o: object | true // allowed
- reads set o: object | true // allowed
+ modifies set o: object | true // allowed
decreases set o: object | true // allowed
{
- if null in set o: object | true then // allowed -- in a ghost context
- F()
- else
- 0
+ if null in set o: object | true { // this makes the "if" a ghost
+ r := G(null);
+ s := G(null); // error: assignment of non-ghost not allowed inside ghost "if"
+ } else {
+ r := 0;
+ }
}
-function method G(): int
+method M1() returns (ghost r: int, s: int)
requires null in set o: object | true // (X) allowed
ensures null in set o: object | true // (X) allowed
- reads set o: object | true // allowed
+ modifies set o: object | true // allowed
decreases set o: object | true // (X) allowed
{
- if null in set o: object | true then // not allowed, since this is not a ghost context
- G()
- else
- 0
+ if null in set o: object | true { // this makes the "if" a ghost
+ r := G(null);
+ s := G(null); // error: assignment of non-ghost not allowed inside ghost "if"
+ } else {
+ r := 0;
+ }
}
diff --git a/Test/dafny4/set-compr.dfy.expect b/Test/dafny4/set-compr.dfy.expect
index b31c6ac0..b0490a11 100644
--- a/Test/dafny4/set-compr.dfy.expect
+++ b/Test/dafny4/set-compr.dfy.expect
@@ -1,3 +1,14 @@
-set-compr.dfy(25,7): Error: a set comprehension must produce a finite set, but Dafny's heuristics can't figure out how to produce a bounded set of values for 'o'
-set-compr.dfy(51,13): Error: a set comprehension must produce a finite set, but Dafny's heuristics can't figure out how to produce a bounded set of values for 'o'
-2 resolution/type errors detected in set-compr.dfy
+set-compr.dfy(25,7): Error: set comprehensions in non-ghost contexts must be compilable, but Dafny's heuristics can't figure out how to produce or compile a bounded set of values for 'o'
+set-compr.dfy(34,16): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(35,15): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(36,8): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(37,12): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(39,10): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(46,16): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(47,15): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(48,8): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(49,12): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(51,10): Error: a set comprehension involved in a function definition is not allowed to depend on the set of allocated references; Dafny's heuristics can't figure out a bound for the values of 'o'
+set-compr.dfy(65,6): Error: Assignment to non-ghost variable is not allowed in this context (because this is a ghost method or because the statement is guarded by a specification-only expression)
+set-compr.dfy(79,6): Error: Assignment to non-ghost variable is not allowed in this context (because this is a ghost method or because the statement is guarded by a specification-only expression)
+13 resolution/type errors detected in set-compr.dfy