diff options
author | Rustan Leino <leino@microsoft.com> | 2011-11-15 13:03:52 -0800 |
---|---|---|
committer | Rustan Leino <leino@microsoft.com> | 2011-11-15 13:03:52 -0800 |
commit | 94fcbefe78e8f7f866e0e6f743fa4b1ab258b296 (patch) | |
tree | 882ca63e87067b9ef5d94627c8c48ecd160bcc85 /Test/vstte2012 | |
parent | 9985eb5e81b1c5c62aceb4edab5d07b1f4f46809 (diff) |
Added Dafny solutions to the VSTTE 2012 program verification competition
Diffstat (limited to 'Test/vstte2012')
-rw-r--r-- | Test/vstte2012/Answer | 20 | ||||
-rw-r--r-- | Test/vstte2012/BreadthFirstSearch.dfy | 276 | ||||
-rw-r--r-- | Test/vstte2012/Combinators.dfy | 459 | ||||
-rw-r--r-- | Test/vstte2012/RingBuffer.dfy | 93 | ||||
-rw-r--r-- | Test/vstte2012/Tree.dfy | 172 | ||||
-rw-r--r-- | Test/vstte2012/Two-Way-Sort.dfy | 58 | ||||
-rw-r--r-- | Test/vstte2012/runtest.bat | 24 |
7 files changed, 1102 insertions, 0 deletions
diff --git a/Test/vstte2012/Answer b/Test/vstte2012/Answer new file mode 100644 index 00000000..15a95de1 --- /dev/null +++ b/Test/vstte2012/Answer @@ -0,0 +1,20 @@ +
+-------------------- Two-Way-Sort.dfy --------------------
+
+Dafny program verifier finished with 4 verified, 0 errors
+
+-------------------- Combinators.dfy --------------------
+
+Dafny program verifier finished with 25 verified, 0 errors
+
+-------------------- RingBuffer.dfy --------------------
+
+Dafny program verifier finished with 13 verified, 0 errors
+
+-------------------- Tree.dfy --------------------
+
+Dafny program verifier finished with 15 verified, 0 errors
+
+-------------------- BreadthFirstSearch.dfy --------------------
+
+Dafny program verifier finished with 24 verified, 0 errors
diff --git a/Test/vstte2012/BreadthFirstSearch.dfy b/Test/vstte2012/BreadthFirstSearch.dfy new file mode 100644 index 00000000..5153b053 --- /dev/null +++ b/Test/vstte2012/BreadthFirstSearch.dfy @@ -0,0 +1,276 @@ +class BreadthFirstSearch<Vertex>
+{
+ // The following function is left uninterpreted (for the purpose of the
+ // verification problem, it can be thought of as a parameter to the class)
+ function method Succ(x: Vertex): set<Vertex>
+
+ // This is the definition of what it means to be a path "p" from vertex
+ // "source" to vertex "dest"
+ function IsPath(source: Vertex, dest: Vertex, p: seq<Vertex>): bool
+ {
+ if source == dest then
+ p == []
+ else
+ p != [] && dest in Succ(p[|p|-1]) && IsPath(source, p[|p|-1], p[..|p|-1])
+ }
+
+ function IsClosed(S: set<Vertex>): bool // says that S is closed under Succ
+ {
+ forall v :: v in S ==> Succ(v) <= S
+ }
+
+ // This is the main method to be verified. Note, instead of using a
+ // postcondition that talks about that there exists a path (such that ...),
+ // this method returns, as a ghost out-parameter, that existential
+ // witness. The method could equally well have been written using an
+ // existential quantifier and no ghost out-parameter.
+ method BFS(source: Vertex, dest: Vertex, ghost AllVertices: set<Vertex>)
+ returns (d: int, ghost path: seq<Vertex>)
+ // source and dest are among AllVertices
+ requires source in AllVertices && dest in AllVertices;
+ // AllVertices is closed under Succ
+ requires IsClosed(AllVertices);
+ // This method has two basic outcomes, as indicated by the sign of "d".
+ // More precisely, "d" is non-negative iff "source" can reach "dest".
+ // The following postcondition says that under the "0 <= d" outcome,
+ // "path" denotes a path of length "d" from "source" to "dest":
+ ensures 0 <= d ==> IsPath(source, dest, path) && |path| == d;
+ // Moreover, that path is as short as any path from "source" to "dest":
+ ensures 0 <= d ==> forall p :: IsPath(source, dest, p) ==> |path| <= |p|;
+ // Finally, under the outcome "d < 0", there is no path from "source" to "dest":
+ ensures d < 0 ==> !exists p :: IsPath(source, dest, p);
+ {
+ var V, C, N := {source}, {source}, {};
+ ghost var Processed, paths := {}, Maplet({source}, source, [], Empty);
+ // V - all encountered vertices
+ // Processed - vertices reachable from "source" is at most "d" steps
+ // C - unprocessed vertices reachable from "source" in "d" steps
+ // (but no less)
+ // N - vertices encountered and reachable from "source" in "d+1" steps
+ // (but no less)
+ d := 0;
+ var dd := Zero;
+ while (C != {})
+ // V, Processed, C, N are all subsets of AllVertices:
+ invariant V <= AllVertices && Processed <= AllVertices && C <= AllVertices && N <= AllVertices;
+ // source is encountered:
+ invariant source in V;
+ // V is the disjoint union of Processed, C, and N:
+ invariant V == Processed + C + N;
+ invariant Processed !! C !! N; // Processed, C, and N are mutually disjoint
+ // "paths" records a path for every encountered vertex
+ invariant ValidMap(source, paths);
+ invariant V == Domain(paths);
+ // shortest paths for vertices in C have length d, and for vertices in N
+ // have length d+1
+ invariant forall x :: x in C ==> |Find(source, x, paths)| == d;
+ invariant forall x :: x in N ==> |Find(source, x, paths)| == d + 1;
+ // "dd" is just an inductive-looking way of writing "d":
+ invariant Value(dd) == d;
+ // "dest" is not reachable for any smaller value of "d":
+ invariant dest in R(source, dd, AllVertices) ==> dest in C;
+ invariant d != 0 ==> dest !in R(source, dd.predecessor, AllVertices);
+ // together, Processed and C are all the vertices reachable in at most d steps:
+ invariant Processed + C == R(source, dd, AllVertices);
+ // N are the successors of Processed that are not reachable within d steps:
+ invariant N == Successors(Processed, AllVertices) - R(source, dd, AllVertices);
+ // if we have exhausted C, then we have also exhausted N:
+ invariant C == {} ==> N == {};
+ // variant:
+ decreases AllVertices - Processed;
+ {
+ // remove a vertex "v" from "C"
+ var v := choose C;
+ C, Processed := C - {v}, Processed + {v};
+ ghost var pathToV := Find(source, v, paths);
+
+ if (v == dest) {
+ parallel (p | IsPath(source, dest, p))
+ ensures |pathToV| <= |p|;
+ {
+ Lemma_IsPath_R(source, dest, p, AllVertices);
+ if (|p| < |pathToV|) {
+ // show that this branch is impossible
+ ToNat_Value_Bijection(|p|);
+ RMonotonicity(source, ToNat(|p|), dd.predecessor, AllVertices);
+ }
+ }
+ return d, pathToV;
+ }
+
+ // process newly encountered successors
+ var newlyEncountered := set w | w in Succ(v) && w !in V;
+ V, N := V + newlyEncountered, N + newlyEncountered;
+ paths := UpdatePaths(newlyEncountered, source, paths, v, pathToV);
+
+ if (C == {}) {
+ C, N, d, dd := N, {}, d+1, Suc(dd);
+ }
+ }
+
+ // show that "dest" in not in any reachability set, no matter
+ // how many successors one follows
+ parallel (nn)
+ ensures dest !in R(source, nn, AllVertices);
+ {
+ if (Value(nn) < Value(dd)) {
+ RMonotonicity(source, nn, dd, AllVertices);
+ } else {
+ IsReachFixpoint(source, dd, nn, AllVertices);
+ }
+ }
+
+ // Now, show what what the above means in terms of IsPath. More
+ // precisely, show that there is no path "p" from "source" to "dest".
+ parallel (p | IsPath(source, dest, p))
+ // this and the previous two lines will establish the
+ // absurdity of a "p" satisfying IsPath(source, dest, p)
+ ensures false;
+ {
+ Lemma_IsPath_R(source, dest, p, AllVertices);
+ // a consequence of Lemma_IsPath_R is:
+ // dest in R(source, ToNat(|p|), AllVertices)
+ // but that contradicts the conclusion of the preceding parallel statement
+ }
+
+ d := -1; // indicate "no path"
+ }
+
+ // property of IsPath
+
+ ghost method Lemma_IsPath_Closure(source: Vertex, dest: Vertex,
+ p: seq<Vertex>, AllVertices: set<Vertex>)
+ requires IsPath(source, dest, p) && source in AllVertices && IsClosed(AllVertices);
+ ensures dest in AllVertices && forall v :: v in p ==> v in AllVertices;
+ {
+ if (p != []) {
+ var last := |p| - 1;
+ Lemma_IsPath_Closure(source, p[last], p[..last], AllVertices);
+ }
+ }
+
+ // operations on Nat
+
+ function Value(nn: Nat): nat
+ ensures ToNat(Value(nn)) == nn;
+ {
+ match nn
+ case Zero => 0
+ case Suc(mm) => Value(mm) + 1
+ }
+
+ function ToNat(n: nat): Nat
+ {
+ if n == 0 then Zero else Suc(ToNat(n - 1))
+ }
+
+ ghost method ToNat_Value_Bijection(n: nat)
+ ensures Value(ToNat(n)) == n;
+ {
+ // Dafny automatically figures out the inductive proof of the postcondition
+ }
+
+ // Reachability
+
+ function R(source: Vertex, nn: Nat, AllVertices: set<Vertex>): set<Vertex>
+ {
+ match nn
+ case Zero => {source}
+ case Suc(mm) => R(source, mm, AllVertices) +
+ Successors(R(source, mm, AllVertices), AllVertices)
+ }
+
+ function Successors(S: set<Vertex>, AllVertices: set<Vertex>): set<Vertex>
+ {
+ set w | w in AllVertices && exists x :: x in S && w in Succ(x)
+ }
+
+ ghost method RMonotonicity(source: Vertex, mm: Nat, nn: Nat, AllVertices: set<Vertex>)
+ requires Value(mm) <= Value(nn);
+ ensures R(source, mm, AllVertices) <= R(source, nn, AllVertices);
+ decreases Value(nn) - Value(mm);
+ {
+ if (Value(mm) < Value(nn)) {
+ RMonotonicity(source, Suc(mm), nn, AllVertices);
+ }
+ }
+
+ ghost method IsReachFixpoint(source: Vertex, mm: Nat, nn: Nat, AllVertices: set<Vertex>)
+ requires R(source, mm, AllVertices) == R(source, Suc(mm), AllVertices);
+ requires Value(mm) <= Value(nn);
+ ensures R(source, mm, AllVertices) == R(source, nn, AllVertices);
+ decreases Value(nn) - Value(mm);
+ {
+ if (Value(mm) < Value(nn)) {
+ IsReachFixpoint(source, Suc(mm), nn, AllVertices);
+ }
+ }
+
+ ghost method Lemma_IsPath_R(source: Vertex, x: Vertex,
+ p: seq<Vertex>, AllVertices: set<Vertex>)
+ requires IsPath(source, x, p) && source in AllVertices && IsClosed(AllVertices);
+ ensures x in R(source, ToNat(|p|), AllVertices);
+ {
+ if (p != []) {
+ Lemma_IsPath_Closure(source, x, p, AllVertices);
+ var last := |p| - 1;
+ Lemma_IsPath_R(source, p[last], p[..last], AllVertices);
+ }
+ }
+
+ // operations on Map's
+
+ function Domain(m: Map<Vertex>): set<Vertex>
+ {
+// if m.Maplet? then m.dom else {}
+// if m == Empty then {} else assert m.Maplet?; m.dom
+ match m
+ case Empty => {}
+ case Maplet(dom, t, s, nxt) => dom
+ }
+
+ // ValidMap encodes the consistency of maps (think, invariant).
+ // An explanation of this idiom is explained in the README file.
+ function ValidMap(source: Vertex, m: Map<Vertex>): bool
+ {
+ match m
+ case Empty => true
+ case Maplet(dom, v, path, next) =>
+ v in dom && dom == Domain(next) + {v} &&
+ IsPath(source, v, path) &&
+ ValidMap(source, next)
+ }
+
+ function Find(source: Vertex, x: Vertex, m: Map<Vertex>): seq<Vertex>
+ requires ValidMap(source, m) && x in Domain(m);
+ ensures IsPath(source, x, Find(source, x, m));
+ {
+ match m
+ case Maplet(dom, v, path, next) =>
+ if x == v then path else Find(source, x, next)
+ }
+
+ ghost method UpdatePaths(vSuccs: set<Vertex>, source: Vertex,
+ paths: Map<Vertex>, v: Vertex, pathToV: seq<Vertex>)
+ returns (newPaths: Map<Vertex>)
+ requires ValidMap(source, paths);
+ requires vSuccs !! Domain(paths);
+ requires forall succ :: succ in vSuccs ==> IsPath(source, succ, pathToV + [v]);
+ ensures ValidMap(source, newPaths) && Domain(newPaths) == Domain(paths) + vSuccs;
+ ensures forall x :: x in Domain(paths) ==>
+ Find(source, x, paths) == Find(source, x, newPaths);
+ ensures forall x :: x in vSuccs ==> Find(source, x, newPaths) == pathToV + [v];
+ {
+ if (vSuccs == {}) {
+ newPaths := paths;
+ } else {
+ var succ := choose vSuccs;
+ newPaths := Maplet(Domain(paths) + {succ}, succ, pathToV + [v], paths);
+ newPaths := UpdatePaths(vSuccs - {succ}, source, newPaths, v, pathToV);
+ }
+ }
+}
+
+datatype Map<T> = Empty | Maplet(dom: set<T>, T, seq<T>, next: Map<T>);
+
+datatype Nat = Zero | Suc(predecessor: Nat);
diff --git a/Test/vstte2012/Combinators.dfy b/Test/vstte2012/Combinators.dfy new file mode 100644 index 00000000..46daf48d --- /dev/null +++ b/Test/vstte2012/Combinators.dfy @@ -0,0 +1,459 @@ +// Problem 2 concerns an interpreter for the language of S and K combinators.
+
+// -----------------------------------------------------------------------------
+// Definitions
+
+// First, we define the language of combinator terms. "Apply(x, y)" is what
+// the problem description writes as "(x y)". In the following Dafny
+// definition, "car" and "cdr" are declared to be destructors for terms
+// constructed by Apply.
+
+datatype Term = S | K | Apply(car: Term, cdr: Term);
+
+// The problem defines values to be a subset of the terms. More precisely,
+// a Value is a Term that fits the following grammar:
+// Value = K | S | (K Value) | (S Value) | ((S Value) Value)
+// The following predicate says whether or not a given term is a value.
+
+function method IsValue(t: Term): bool
+ ensures IsValue(t) && t.Apply? ==> IsValue(t.car) && IsValue(t.cdr);
+{
+ match t
+ case K => true
+ case S => true
+ case Apply(a, b) =>
+ match a
+ case K =>
+ assert IsValue(a);
+ IsValue(b)
+ case S =>
+ assert IsValue(a);
+ IsValue(b)
+ case Apply(x, y) =>
+ assert x==S && IsValue(y) && IsValue(b) ==> IsValue(a);
+ x==S && IsValue(y) && IsValue(b)
+}
+
+// A context is essentially a term with one missing subterm, a "hole". It
+// is defined as follows:
+
+datatype Context = Hole | C_term(Context, Term) | value_C(Term/*Value*/, Context);
+
+// The problem seems to suggest that the value_C form requires a value and
+// a context. To formalize that notion, we define a predicate that checks this
+// condition.
+
+function IsContext(C: Context): bool
+{
+ match C
+ case Hole => true // []
+ case C_term(D, t) => IsContext(D) // (D t)
+ case value_C(v, D) => IsValue(v) && IsContext(D) // (v D)
+}
+
+// The EvalExpr function replace the hole in a context with a given term.
+
+function EvalExpr(C: Context, t: Term): Term
+ requires IsContext(C);
+{
+ match C
+ case Hole => t
+ case C_term(D, u) => Apply(EvalExpr(D, t), u)
+ case value_C(v, D) => Apply(v, EvalExpr(D, t))
+}
+
+// A term can be reduced. This reduction operation is defined via
+// a single-step reduction operation. In the problem, the single-step
+// reduction has the form:
+// C[t] --> C[r]
+// We formalize the single-step reduction by a function Step, which
+// performs the reduction if it applies or just returns the given term
+// if it does. We will say that "Step applies" to refer to the first
+// case, which is a slight abuse of language, since the function "Step"
+// is total.
+//
+// Since the context C is the same on both sides in the single-step
+// reduction above, we don't pass it to function Step. Rather, Step
+// just takes a term "t" and returns the following:
+// match t
+// case ((K v1) v2) => v1
+// case (((S v1) v2) v3) => ((v1 v3) (v2 v3))
+// else t
+// As you can see below, it takes more lines than shown here to express
+// this matching in Dafny, but this is all that Step does.
+//
+// Again, note that Step returns the given term if neither or the two
+// vs in the problem statement applies.
+//
+// One more thing: Step has a postcondition (and the body of Step
+// contains three asserts that act as lemmas in proving the postcondition).
+// The postcondition has been included for the benefit of Verification
+// Task 2, and we describe the functions used in the Step postcondition
+// only much later in this file. For now, the postcondition can be
+// ignored, since it is, after all, just a consequence of the body
+// of Step.
+
+function method Step(t: Term): Term
+ ensures !ContainsS(t) ==>
+ !ContainsS(Step(t)) &&
+ (Step(t) == t || TermSize(Step(t)) < TermSize(t));
+{
+ match t
+ case S => t
+ case K => t
+ case Apply(x, y) =>
+ match x
+ case S => t
+ case K => t
+ case Apply(m, n) =>
+ if m == K && IsValue(n) && IsValue(y) then
+ // this is the case t == Apply(Apply(K, n), y)
+ assert !ContainsS(t) ==> !ContainsS(x);
+ assert TermSize(n) < TermSize(Apply(m, n));
+ n
+ else if m.Apply? && m.car == S && IsValue(m.cdr) && IsValue(n) && IsValue(y) then
+ // t == Apply(Apply(Apply(S, m.cdr), n), y)
+ assert ContainsS(m) && ContainsS(t);
+ Apply(Apply(m.cdr, y), Apply(n, y))
+ else
+ t
+}
+
+// The single-step reduction operation may be applied to any subexpression
+// of a term that could be considered a hole. Function FindAndStep
+// searches for a (context, term) pair C[u] that denotes a given term "t"
+// such that Step applies to "u". If found, the function returns
+// C[Step(u)], which will necessarily be different from "t". If no such
+// C[u] pair exists, this function returns the given "t".
+//
+// Note, FindAndStep only applies one Step. We will get to repeated
+// applications of steps in the "reduction" method below.
+//
+// For all definitions above, it was necessary to check (manually) that
+// they correspond to the definitions intended in the problem statement.
+// That is, the definitions above are all part of the specification.
+// For function FindAndStep, the definition given does not require
+// such scrutiny. Rather, we will soon state a theorem that states
+// the properties of what FindAndStep returns.
+//
+// Like Step, FindAndStep has a postcondition, and it is also included to
+// support Verification Task 2.
+
+function method FindAndStep(t: Term): Term
+ ensures !ContainsS(t) ==>
+ !ContainsS(FindAndStep(t)) &&
+ (FindAndStep(t) == t || TermSize(FindAndStep(t)) < TermSize(t));
+{
+ if Step(t) != t then
+ Step(t)
+ else if !t.Apply? then
+ t
+ else if FindAndStep(t.car) != t.car then
+ Apply(FindAndStep(t.car), t.cdr)
+ else if IsValue(t.car) && FindAndStep(t.cdr) != t.cdr then
+ Apply(t.car, FindAndStep(t.cdr))
+ else
+ t
+}
+
+// One part of the correctness of FindAndStep (and, indeed, of method
+// "reduction" below) is that a term can be terminal, meaning that there is
+// no way to apply Step to any part of it.
+
+function IsTerminal(t: Term): bool
+{
+ !(exists C,u :: IsContext(C) && t == EvalExpr(C,u) && Step(u) != u)
+}
+
+// The following theorem states the correctness of the FindAndStep function:
+
+ghost method Theorem_FindAndStep(t: Term)
+ // If FindAndStep returns the term it started from, then there is no
+ // way to take a step. More precisely, there is no C[u] == t for which the
+ // Step applies to "u".
+ ensures FindAndStep(t) == t ==> IsTerminal(t);
+ // If FindAndStep returns a term that's different from what it started with,
+ // then it chose some C[u] == t for which the Step applies to "u", and then
+ // it returned C[Step(u)].
+ ensures FindAndStep(t) != t ==>
+ exists C,u :: IsContext(C) && t == EvalExpr(C,u) && Step(u) != u &&
+ FindAndStep(t) == EvalExpr(C, Step(u));
+{
+ // The theorem follows from the following lemma, which itself is proved by
+ // induction.
+ var r, C, u := Lemma_FindAndStep(t);
+}
+
+// This is the lemma that proves the theorem above. Whereas the theorem talks
+// existentially about C and u, the lemma constructs C and u and returns them,
+// which is useful in the proof by induction. The computation inside the
+// lemma mimicks that done by function FindAndStep; indeed, the lemma
+// computes the value of FindAndStep(t) as it goes along and it returns
+// that value.
+
+ghost method Lemma_FindAndStep(t: Term) returns (r: Term, C: Context, u: Term)
+ ensures r == FindAndStep(t);
+ ensures r == t ==> IsTerminal(t);
+ ensures r != t ==>
+ IsContext(C) && t == EvalExpr(C,u) && Step(u) != u &&
+ r == EvalExpr(C, Step(u));
+{
+ Lemma_ContextPossibilities(t);
+ if (Step(t) != t) {
+ // t == Hole[t] and Step applies t. So, return Hole[Step(t)]
+ return Step(t), Hole, t;
+ } else if (!t.Apply?) {
+ r := t;
+ } else {
+ r, C, u := Lemma_FindAndStep(t.car); // (*)
+ if (r != t.car) {
+ // t has the form (a b) where a==t.car and b==t.cdr, and a==C[u] for some
+ // context C and some u to which the Step applies. t can therefore be
+ // denoted by (C[u] b) == (C b)[u] and the Step applies to u. So, return
+ // (C b)[Step(u)] == (C[Step(u)] b). Note that FindAndStep(a)
+ // gives C[Step(u)].
+ return Apply(r, t.cdr), C_term(C, t.cdr), u;
+ } else if (IsValue(t.car)) {
+ r, C, u := Lemma_FindAndStep(t.cdr);
+ assert IsTerminal(t.car); // make sure this is still remembered from (*)
+
+ if (r != t.cdr) {
+ // t has the form (a b) where a==t.car and b==t.cdr and "a" is a Value,
+ // and b==C[u] for some context C and some u to which the Step applies.
+ // t can therefore be denoted by (a C[u]) == (C a)[u] and the Step
+ // applies to u. So, return (C a)[Step(u)] == (a C[Step(u)]). Note
+ // that FindAndStep(b) gives C[Step(u)].
+ return Apply(t.car, r), value_C(t.car, C), u;
+ } else {
+ parallel (C,u | IsContext(C) && t == EvalExpr(C,u))
+ ensures Step(u) == u;
+ {
+ // The following assert and the first assert of each "case" are
+ // consequences of the Lemma_ContextPossibilities that was invoked
+ // above.
+ assert t.Apply? && IsValue(t.car);
+ match (C) {
+ case Hole =>
+ assert t == u;
+ case C_term(D, bt) =>
+ assert bt == t.cdr && t.car == EvalExpr(D, u);
+ case value_C(at, D) =>
+ assert at == t.car && t.cdr == EvalExpr(D, u);
+ }
+ }
+ r := t;
+ }
+ } else {
+ r := t;
+ }
+ }
+}
+
+// The proof of the lemma above used one more lemma, namely one that enumerates
+// lays out the options for how to represent a term as a C[u] pair.
+
+ghost method Lemma_ContextPossibilities(t: Term)
+ ensures forall C,u :: IsContext(C) && t == EvalExpr(C, u) ==>
+ (C == Hole && t == u) ||
+ (t.Apply? && exists D :: C == C_term(D, t.cdr) && t.car == EvalExpr(D, u)) ||
+ (t.Apply? && IsValue(t.car) &&
+ exists D :: C == value_C(t.car, D) && t.cdr == EvalExpr(D, u));
+{
+ // Dafny's induction tactic rocks
+}
+
+// We now define a way to record a sequence of reduction steps.
+// IsTrace(trace, t, r) returns true iff "trace" gives witness to a
+// sequence of terms from "t" to "r", each term reducing to its
+// successor in the trace.
+
+datatype Trace = EmptyTrace | ReductionStep(Trace, Term);
+
+function IsTrace(trace: Trace, t: Term, r: Term): bool
+{
+ match trace
+ case EmptyTrace =>
+ t == r
+ case ReductionStep(tr, u) =>
+ IsTrace(tr, t, u) && FindAndStep(u) == r
+}
+
+// Finally, we are ready to give the requested routine "reduction", which
+// keeps applying FindAndStep until quiescence, that is, until Step
+// no longer applies.
+//
+// As required by Verification Task 1, the "reduction" method has two
+// postconditions. One says that the term returned, "r", was obtained
+// from the original term, "t", by a sequence of reduction steps. The
+// other says that "r" cannot be reduced any further.
+//
+// Unlike the other competition problems, this one requested code
+// (for "reduction") that may not terminate. In order to allow reasoning
+// about algorithms that may never terminate, Dafny has a special loop
+// statement (a "while" loop with a declaration "decreases *") that
+// thus comes in handy for "reduction". (Dafny never allows recursion
+// to be non-terminating, only these special loops.) Note that use
+// of the special loop statement does not have any effect on the
+// specifications of the enclosing method (but this may change in a
+// future version of Dafny).
+
+method reduction(t: Term) returns (r: Term)
+ // The result was obtained by a sequence of reductions:
+ ensures exists trace :: IsTrace(trace, t, r);
+ // The result "r" cannot be reduced any further:
+ ensures IsTerminal(r);
+{
+ r := t;
+ ghost var trace := EmptyTrace;
+ while (true)
+ invariant IsTrace(trace, t, r);
+ decreases *; // allow this statement to loop forever
+ {
+ var u := FindAndStep(r);
+ if (u == r) {
+ // we have found a fixpoint
+ Theorem_FindAndStep(r);
+ return;
+ }
+ r, trace := u, ReductionStep(trace, r);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Verification Task 2
+//
+// This part of the problem asks us to consider the reduction of terms that
+// do not contain S. The following function formalizes what it means for a term
+// to contain S:
+
+function method ContainsS(t: Term): bool
+{
+ match t
+ case S => true
+ case K => false
+ case Apply(x, y) => ContainsS(x) || ContainsS(y)
+}
+
+// The verification task itself is to prove that "reduction" terminates on any
+// term that does not contain S. To prove this, we need to supply a loop variant
+// for the loop in "reduction". However, Dafny does not allow one loop to be
+// proved to terminate in some cases and allowed not to terminate in other cases.
+// There, we meet Verification Task 2 by manually copying the body of "reduction"
+// into a new method (called VerificationTask2) and proving that this new method
+// terminates. Of course, Dafny does not check that we copy the body correctly,
+// so that needs to be checked by a human.
+//
+// In method VerificationTask2, we added not just the precondition given in the
+// Verification Task and a loop variant, but we also added two loop invariants
+// and one more postcondition. One of the loop invariants keep track of that
+// there are no S's. The other loop invariant and the postcondition are for
+// the benefit of Verification Task 3, as we explain later.
+
+method VerificationTask2(t: Term) returns (r: Term)
+ requires !ContainsS(t); // a sufficient condition for termination
+ // The result was obtained by a sequence of reductions:
+ ensures exists trace :: IsTrace(trace, t, r);
+ // The result "r" cannot be reduced any further:
+ ensures IsTerminal(r);
+ // Later in this file, we define a function TerminatingReduction, and the
+ // following postcondition says that TerminatingReduction computes the same
+ // term as this method does.
+ ensures r == TerminatingReduction(t);
+{
+ r := t;
+ ghost var trace := EmptyTrace;
+ while (true)
+ invariant IsTrace(trace, t, r) && !ContainsS(r);
+ invariant TerminatingReduction(t) == TerminatingReduction(r);
+ decreases TermSize(r);
+ {
+ var u := FindAndStep(r);
+ if (u == r) {
+ // we have found a fixpoint
+ Theorem_FindAndStep(r);
+ return;
+ }
+ r, trace := u, ReductionStep(trace, r);
+ }
+}
+
+// What now follows is the definition TermSize, which is used in the
+// loop variant. When a Step is applied to a term without S, TermSize
+// is reduced, which is stated as a postcondition of both Step and
+// FindAndStep. That postcondition of FindAndStep is used in the
+// proof of termination of method VerificationTask2.
+
+// The loop variant is simply the count of nodes in the term:
+
+function TermSize(t: Term): nat
+{
+ match t
+ case S => 1
+ case K => 1
+ case Apply(x, y) => 1 + TermSize(x) + TermSize(y)
+}
+
+// We have already given two methods for computing a reduction:
+// method "reduction", which may or may not terminate, and method
+// "VerificationTask2", whose precondition is strong enough to let
+// us prove that the method will terminate. The correspondence
+// between the two methods is checked by hand, seeing that
+// VerificationTask2 includes the postconditions of "reduction" and
+// seeing that the code is the same.
+//
+// We now define a third way of computing reductions, this time
+// using a function (not a method). To prove that this function
+// computes the same thing as method VerificationTask2, we had
+// added a postcondition to VerificationTask2 above. This function
+// is introduced for the benefit of stating and verifying Verification
+// Task 3.
+
+function TerminatingReduction(t: Term): Term
+ requires !ContainsS(t); // a sufficient condition for termination
+ decreases TermSize(t);
+{
+ if FindAndStep(t) == t then
+ t // we have reached a fixpoint
+ else
+ TerminatingReduction(FindAndStep(t))
+}
+
+// -----------------------------------------------------------------------------
+// Verification Task 3
+
+// Here is the function "ks" from Verification Task 3. It produces a particular
+// family of terms that contain only Apply and K. Hence, we can establish, as a
+// postcondition of the function, that ks(n) does not contain S.
+
+function method ks(n: nat): Term
+ ensures !ContainsS(ks(n));
+{
+ if n == 0 then K else Apply(ks(n-1), K)
+}
+
+// Verification Task 3 is now established by the following theorem. It says
+// that reducing ks(n) results in either K and (K K), depending on the parity
+// of n. The theorem uses function TerminatingReduction to speak of the
+// reduction--remember that (by the last postcondition of method
+// VerificationTask2) it computes the same thing as method VerificationTask2
+// does.
+
+ghost method VerificationTask3()
+ ensures forall n: nat ::
+ TerminatingReduction(ks(n)) == if n % 2 == 0 then K else Apply(K, K);
+{
+ parallel (n: nat) {
+ VT3(n);
+ }
+}
+
+ghost method VT3(n: nat)
+ ensures TerminatingReduction(ks(n)) == if n % 2 == 0 then K else Apply(K, K);
+{
+ // Dafny's (way cool) induction tactic kicks in and proves the following
+ // assertion automatically:
+ assert forall p :: 2 <= p ==> FindAndStep(ks(p)) == ks(p-2);
+ // And then Dafny's (cool beyond words) induction tactic for ghost methods kicks
+ // in to prove the postcondition. (If this got you curious, scope out Leino's
+ // VMCAI 2012 paper "Automating Induction with an SMT Solver".)
+}
diff --git a/Test/vstte2012/RingBuffer.dfy b/Test/vstte2012/RingBuffer.dfy new file mode 100644 index 00000000..5262e462 --- /dev/null +++ b/Test/vstte2012/RingBuffer.dfy @@ -0,0 +1,93 @@ +class RingBuffer<T>
+{
+ // public fields that are used to define the abstraction:
+ ghost var Contents: seq<T>; // the contents of the ring buffer
+ ghost var N: nat; // the capacity of the ring buffer
+ ghost var Repr: set<object>; // the set of objects used in the implementation
+
+ // Valid encodes the consistency of RingBuffer objects (think, invariant).
+ // An explanation of this idiom is explained in the README file.
+ function Valid(): bool
+ reads this, Repr;
+ {
+ this in Repr && null !in Repr &&
+ data != null && data in Repr &&
+ data.Length == N &&
+ (N == 0 ==> len == first == 0 && Contents == []) &&
+ (N != 0 ==> len <= N && first < N) &&
+ Contents == if first + len <= N then data[first..first+len]
+ else data[first..] + data[..first+len-N]
+ }
+
+ // private implementation:
+ var data: array<T>;
+ var first: nat;
+ var len: nat;
+
+ constructor Create(n: nat)
+ modifies this;
+ ensures Valid() && fresh(Repr - {this});
+ ensures Contents == [] && N == n;
+ {
+ Repr := {this};
+ data := new T[n];
+ Repr := Repr + {data};
+ first, len := 0, 0;
+ Contents, N := [], n;
+ }
+
+ method Clear()
+ requires Valid();
+ modifies Repr;
+ ensures Valid() && fresh(Repr - old(Repr));
+ ensures Contents == [] && N == old(N);
+ {
+ len := 0;
+ Contents := [];
+ }
+
+ method Head() returns (x: T)
+ requires Valid();
+ requires Contents != [];
+ ensures x == Contents[0];
+ {
+ x := data[first];
+ }
+
+ method Push(x: T)
+ requires Valid();
+ requires |Contents| != N;
+ modifies Repr;
+ ensures Valid() && fresh(Repr - old(Repr));
+ ensures Contents == old(Contents) + [x] && N == old(N);
+ {
+ var nextEmpty := if first + len < data.Length
+ then first + len else first + len - data.Length;
+ data[nextEmpty] := x;
+ len := len + 1;
+ Contents := Contents + [x];
+ }
+
+ method Pop() returns (x: T)
+ requires Valid();
+ requires Contents != [];
+ modifies Repr;
+ ensures Valid() && fresh(Repr - old(Repr));
+ ensures x == old(Contents)[0] && Contents == old(Contents)[1..] && N == old(N);
+ {
+ x := data[first];
+ first, len := if first + 1 == data.Length then 0 else first + 1, len - 1;
+ Contents := Contents[1..];
+ }
+}
+
+method TestHarness(x: int, y: int, z: int)
+{
+ var b := new RingBuffer<int>.Create(2);
+ b.Push(x);
+ b.Push(y);
+ var h := b.Pop(); assert h == x;
+ b.Push(z);
+ h := b.Pop(); assert h == y;
+ h := b.Pop(); assert h == z;
+}
diff --git a/Test/vstte2012/Tree.dfy b/Test/vstte2012/Tree.dfy new file mode 100644 index 00000000..47ed19a4 --- /dev/null +++ b/Test/vstte2012/Tree.dfy @@ -0,0 +1,172 @@ +// The tree datatype
+datatype Tree = Leaf | Node(Tree, Tree);
+
+
+// This datatype is used for the result of the build functions.
+// These functions either fail or yield a tree. Since we use
+// a side-effect free implementation of the helper
+// build_rec, it also has to yield the rest of the sequence,
+// which still needs to be processed. For function build,
+// this part is not used.
+datatype Result = Fail | Res(t: Tree, sOut: seq<int>);
+
+
+// Function toList converts a tree to a sequence.
+// We use Dafny's built-in value type sequence rather than
+// an imperative implementation to simplify verification.
+// The argument d is added to each element of the sequence;
+// it is analogous to the argument d in build_rec.
+// The postconditions state properties that are needed
+// in the completeness proof.
+function toList(d: int, t: Tree): seq<int>
+ ensures toList(d, t) != [] && toList(d, t)[0] >= d;
+ ensures (toList(d, t)[0] == d) == (t == Leaf);
+ decreases t;
+{
+ match t
+ case Leaf => [d]
+ case Node(l, r) => toList(d+1, l) + toList(d+1, r)
+}
+
+
+// Function build_rec is a side-effect free implementation
+// of the code given in the problem statement.
+// The first postcondition is needed to show that the
+// termination measure indeed decreases.
+// The second postcondition specifies the soundness
+// property; converting the resulting tree back into a
+// sequence yields exactly that portion of the input
+// sequence that has been consumed.
+function method build_rec(d: int, s: seq<int>): Result
+ ensures build_rec(d, s).Res? ==>
+ |build_rec(d, s).sOut| < |s| &&
+ build_rec(d, s).sOut == s[|s|-|build_rec(d, s).sOut|..];
+ ensures build_rec(d, s).Res? ==>
+ toList(d,build_rec(d, s).t) == s[..|s|-|build_rec(d, s).sOut|];
+ decreases |s|, (if s==[] then 0 else s[0]-d);
+{
+ if s==[] || s[0] < d then
+ Fail
+ else if s[0] == d then
+ Res(Leaf, s[1..])
+ else
+ var left := build_rec(d+1, s);
+ if left.Fail? then Fail else
+ var right := build_rec(d+1, left.sOut);
+ if right.Fail? then Fail else
+ Res(Node(left.t, right.t), right.sOut)
+}
+
+
+// Function build is a side-effect free implementation
+// of the code given in the problem statement.
+// The postcondition specifies the soundness property;
+// converting the resulting tree back into a
+// sequence yields exactly the input sequence.
+// Completeness is proved as a lemma, see below.
+function method build(s: seq<int>): Result
+ ensures build(s).Res? ==> toList(0,build(s).t) == s;
+{
+ var r := build_rec(0, s);
+ if r.Res? && r.sOut == [] then r else Fail
+}
+
+
+// This ghost methods encodes the main lemma for the
+// completeness theorem. If a sequence s starts with a
+// valid encoding of a tree t then build_rec yields a
+// result (i.e., does not fail) and the rest of the sequence.
+// The body of the method proves the lemma by structural
+// induction on t. Dafny proves termination (using the
+// height of the term t as termination measure), which
+// ensures that the induction hypothesis is applied
+// correctly (encoded by calls to this ghost method).
+ghost method lemma0(t: Tree, d: int, s: seq<int>)
+ ensures build_rec(d, toList(d, t) + s).Res? &&
+ build_rec(d, toList(d, t) + s).sOut == s;
+{
+ match(t) {
+ case Leaf =>
+ assert toList(d, t) == [d];
+ case Node(l, r) =>
+ assert toList(d, t) + s == toList(d+1, l) + (toList(d+1, r) + s);
+
+ lemma0(l, d+1, toList(d+1, r) + s); // apply the induction hypothesis
+ lemma0(r, d+1, s); // apply the induction hypothesis
+ }
+}
+
+
+// This ghost method encodes a lemma that states the
+// completeness property. It is proved by applying the
+// main lemma (lemma0). In this lemma, the bound variables
+// of the completeness theorem are passed as arguments;
+// the following two ghost methods replace these arguments
+// by quantified variables.
+ghost method lemma1(t: Tree, s:seq<int>)
+ requires s == toList(0, t) + [];
+ ensures build(s).Res?;
+{
+ lemma0(t, 0, []);
+}
+
+
+// This ghost method encodes a lemma that introduces the
+// existential quantifier in the completeness property.
+ghost method lemma2(s: seq<int>)
+ ensures (exists t: Tree :: toList(0,t) == s) ==> build(s).Res?;
+{
+ parallel(t | toList(0,t) == s) {
+ lemma1(t, s);
+ }
+}
+
+
+// This ghost method encodes the completeness theorem.
+// For each sequence for which there is a corresponding
+// tree, function build yields a result different from Fail.
+// The body of the method converts the argument of lemma2
+// into a universally quantified variable.
+ghost method completeness()
+ ensures forall s: seq<int> :: ((exists t: Tree :: toList(0,t) == s) ==> build(s).Res?);
+{
+ parallel(s) {
+ lemma2(s);
+ }
+}
+
+
+// This method encodes the first test harness
+// given in the problem statement. The local
+// assertions are required by the verifier to
+// unfold the necessary definitions.
+method harness0()
+ ensures build([1,3,3,2]).Res? &&
+ build([1,3,3,2]).t == Node(Leaf, Node(Node(Leaf, Leaf), Leaf));
+{
+ assert build_rec(2, [2]) ==
+ Res(Leaf, []);
+ assert build_rec(2, [3,3,2]) ==
+ Res(Node(Leaf, Leaf), [2]);
+ assert build_rec(1, [3,3,2]) ==
+ Res(Node(Node(Leaf, Leaf), Leaf), []);
+ assert build_rec(1, [1,3,3,2]) ==
+ Res(Leaf, [3,3,2]);
+ assert build_rec(0, [1,3,3,2]) ==
+ Res(
+ Node(build_rec(1, [1,3,3,2]).t,
+ build_rec(1, [3,3,2]).t),
+ []);
+}
+
+
+// This method encodes the second test harness
+// given in the problem statement. The local
+// assertions are required by the verifier to
+// unfold the necessary definitions.
+method harness1()
+ ensures build([1,3,2,2]).Fail?;
+{
+ assert build_rec(3, [2,2]).Fail?;
+ assert build_rec(1, [3,2,2]).Fail?;
+}
diff --git a/Test/vstte2012/Two-Way-Sort.dfy b/Test/vstte2012/Two-Way-Sort.dfy new file mode 100644 index 00000000..49ff29b4 --- /dev/null +++ b/Test/vstte2012/Two-Way-Sort.dfy @@ -0,0 +1,58 @@ +// This method is a slight generalization of the
+// code provided in the problem statement since it
+// is generic in the type of the array elements.
+method swap<T>(a: array<T>, i: int, j: int)
+ requires a != null;
+ requires 0 <= i < j < a.Length;
+ modifies a;
+ ensures a[i] == old(a[j]);
+ ensures a[j] == old(a[i]);
+ ensures forall m :: 0 <= m < a.Length && m != i && m != j ==> a[m] == old(a[m]);
+ ensures multiset(a[..]) == old(multiset(a[..]));
+{
+ var t := a[i];
+ a[i] := a[j];
+ a[j] := t;
+}
+
+// This method is a direct translation of the pseudo
+// code given in the problem statement.
+// The first postcondition expresses that the resulting
+// array is sorted, that is, all occurrences of "false"
+// come before all occurrences of "true".
+// The second postcondition expresses that the post-state
+// array is a permutation of the pre-state array. To express
+// this, we use Dafny's built-in multisets. The built-in
+// function "multiset" takes an array and yields the
+// multiset of the array elements.
+// Note that Dafny guesses a suitable ranking function
+// for the termination proof of the while loop.
+// We use the loop guard from the given pseudo-code. However,
+// the program also verifies with the stronger guard "i < j"
+// (without changing any of the other specifications or
+// annotations).
+method two_way_sort(a: array<bool>)
+ requires a != null;
+ modifies a;
+ ensures forall m,n :: 0 <= m < n < a.Length ==> (!a[m] || a[n]);
+ ensures multiset(a[..]) == old(multiset(a[..]));
+{
+ var i := 0;
+ var j := a.Length - 1;
+ while (i <= j)
+ invariant 0 <= i <= j + 1 <= a.Length;
+ invariant forall m :: 0 <= m < i ==> !a[m];
+ invariant forall n :: j < n < a.Length ==> a[n];
+ invariant multiset(a[..]) == old(multiset(a[..]));
+ {
+ if (!a[i]) {
+ i := i+1;
+ } else if (a[j]) {
+ j := j-1;
+ } else {
+ swap(a, i, j);
+ i := i+1;
+ j := j-1;
+ }
+ }
+}
diff --git a/Test/vstte2012/runtest.bat b/Test/vstte2012/runtest.bat new file mode 100644 index 00000000..07b5859e --- /dev/null +++ b/Test/vstte2012/runtest.bat @@ -0,0 +1,24 @@ +@echo off
+setlocal
+
+set BOOGIEDIR=..\..\Binaries
+set DAFNY_EXE=%BOOGIEDIR%\Dafny.exe
+set CSC=c:/Windows/Microsoft.NET/Framework/v4.0.30319/csc.exe
+
+for %%f in (
+ Two-Way-Sort.dfy
+ Combinators.dfy
+ RingBuffer.dfy
+ Tree.dfy
+ BreadthFirstSearch.dfy
+ ) do (
+ echo.
+ echo -------------------- %%f --------------------
+
+ REM The following line will just run the verifier
+ IF "%COMPILEDAFNY%"=="" %DAFNY_EXE% /compile:0 /dprint:out.dfy.tmp %* %%f
+
+ REM Alternatively, the following lines also produce C# code and compile it
+ IF NOT "%COMPILEDAFNY%"=="" %DAFNY_EXE% %* %%f
+ IF NOT "%COMPILEDAFNY%"=="" %CSC% /nologo /debug /t:library /out:out.dll /r:System.Numerics.dll out.cs
+)
|