From 0e1252d5a6330570df698df924a0554b688042e8 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 8 Mar 2014 05:06:22 -0500 Subject: Identifies tables read or touched by queries. --- src/compiler.sig | 8 ++++-- src/compiler.sml | 19 +++++++++---- src/sql.sml | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 99 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/compiler.sig b/src/compiler.sig index fa131cf4..df567441 100644 --- a/src/compiler.sig +++ b/src/compiler.sig @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -122,6 +122,7 @@ signature COMPILER = sig val pathcheck : (Mono.file, Mono.file) phase val sidecheck : (Mono.file, Mono.file) phase val sigcheck : (Mono.file, Mono.file) phase + val sqlcache : (Mono.file, Mono.file) phase val cjrize : (Mono.file, Cjr.file) phase val prepare : (Cjr.file, Cjr.file) phase val checknest : (Cjr.file, Cjr.file) phase @@ -137,12 +138,12 @@ signature COMPILER = sig val toCorify : (string, Core.file) transform val toCore_untangle : (string, Core.file) transform val toShake1 : (string, Core.file) transform - val toEspecialize1' : (string, Core.file) transform + val toEspecialize1' : (string, Core.file) transform val toShake1' : (string, Core.file) transform val toRpcify : (string, Core.file) transform val toCore_untangle2 : (string, Core.file) transform val toShake2 : (string, Core.file) transform - val toEspecialize1 : (string, Core.file) transform + val toEspecialize1 : (string, Core.file) transform val toCore_untangle3 : (string, Core.file) transform val toShake3 : (string, Core.file) transform val toTag : (string, Core.file) transform @@ -186,6 +187,7 @@ signature COMPILER = sig val toPathcheck : (string, Mono.file) transform val toSidecheck : (string, Mono.file) transform val toSigcheck : (string, Mono.file) transform + val toSqlcache : (string, Mono.file) transform val toCjrize : (string, Cjr.file) transform val toPrepare : (string, Cjr.file) transform val toChecknest : (string, Cjr.file) transform diff --git a/src/compiler.sml b/src/compiler.sml index cc4e33c5..36a1b03f 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -25,7 +25,7 @@ * POSSIBILITY OF SUCH DAMAGE. *) -structure Compiler :> COMPILER = struct +structure Compiler :> COMPILER = struct structure UrwebLrVals = UrwebLrValsFn(structure Token = LrParser.Token) structure Lex = UrwebLexFn(structure Tokens = UrwebLrVals.Tokens) @@ -268,7 +268,7 @@ val parseUr = { | _ => absyn end handle LrParser.ParseError => [], - print = SourcePrint.p_file} + print = SourcePrint.p_file} fun p_job ({prefix, database, exe, sql, sources, debug, profile, timeout, ffi, link, headers, scripts, @@ -1090,7 +1090,7 @@ val parse = { ErrorMsg.error ("Rooted module " ^ full ^ " has multiple versions.") else (); - + makeD true "" pieces before ignore (foldl (fn (new, path) => let @@ -1438,12 +1438,19 @@ val sigcheck = { val toSigcheck = transform sigcheck "sigcheck" o toSidecheck +val sqlcache = { + func = (fn file => (Sql.go file; file)), + print = MonoPrint.p_file MonoEnv.empty +} + +val toSqlcache = transform sqlcache "sqlcache" o toSigcheck + val cjrize = { func = Cjrize.cjrize, print = CjrPrint.p_file CjrEnv.empty } -val toCjrize = transform cjrize "cjrize" o toSigcheck +val toCjrize = transform cjrize "cjrize" o toSqlcache val prepare = { func = Prepare.prepare, @@ -1596,7 +1603,7 @@ fun compile job = compileC {cname = cname, oname = oname, ename = ename, libs = libs, profile = #profile job, debug = #debug job, linker = #linker job, link = #link job} - + before cleanup ()) end handle ex => (((cleanup ()) handle _ => ()); raise ex) diff --git a/src/sql.sml b/src/sql.sml index c314eb3d..601b3510 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -2,7 +2,7 @@ structure Sql = struct open Mono -val debug = ref false +val debug = ref true (*false*) type lvar = int @@ -238,7 +238,7 @@ fun string chs = end else NONE - | _ => NONE + | _ => NONE val prim = altL [wrap (follow (wrapP (follow (keep Char.isDigit) (follow (const ".") (keep Char.isDigit))) @@ -267,7 +267,7 @@ fun sqlify chs = ((PCon (_, PConFfi {mod = "Basis", con = "False", ...}, NONE), _), (EPrim (Prim.String "FALSE"), _))], _), _) :: chs => SOME (e, chs) - + | _ => NONE fun constK s = wrap (const s) (fn () => s) @@ -317,7 +317,7 @@ fun sqexp chs = and known chs = wrap (follow known' (follow (const "(") (follow sqexp (const ")")))) (fn ((), ((), (e, ()))) => e) chs - + and func chs = wrap (follow funcName (follow (const "(") (follow sqexp (const ")")))) (fn (f, ((), (e, ()))) => (f, e)) chs @@ -425,4 +425,81 @@ datatype querydml = val querydml = log "querydml" (altL [wrap dml Dml, wrap query Query]) +(* New stuff. *) + +fun subExps' (exp' : Mono.exp') = + case exp' of + ECon (_,_,SOME exp) => [exp] + | ESome (_,exp) => [exp] + | EFfiApp (_,_,xs) => map #1 xs + | EApp (exp1,exp2) => [exp1, exp2] + | EAbs (_,_,_,exp) => [exp] + | EUnop (_,exp) => [exp] + | EBinop (_,_,exp1,exp2) => [exp1, exp2] + | ERecord xs => map #2 xs + | EField (exp,_) => [exp] + | ECase (exp,xs,_) => exp :: map #2 xs + | EStrcat (exp1,exp2) => [exp1,exp2] + | EError (exp,_) => [exp] + | EReturnBlob {blob=NONE, mimeType, ...} => [mimeType] + | EReturnBlob {blob=SOME exp, mimeType, ...} => [exp, mimeType] + | ERedirect (exp,_) => [exp] + | EWrite exp => [exp] + | ESeq (exp1,exp2) => [exp1, exp2] + | ELet (_,_,exp1,exp2) => [exp1, exp2] + | EClosure (_,xs) => xs + | EQuery {query, body, initial, ...} => [query, body, initial] + | EDml (exp,_) => [exp] + | ENextval exp => [exp] + | ESetval (exp1,exp2) => [exp1, exp2] + | EUnurlify (exp,_,_) => [exp] + | EJavaScript (_,exp) => [exp] + | ESignalReturn exp => [exp] + | ESignalBind (exp1,exp2) => [exp1, exp2] + | ESignalSource exp => [exp] + | EServerCall (exp,_,_,_) => [exp] + | ERecv (exp,_) => [exp] + | ESleep exp => [exp] + | ESpawn exp => [exp] + | _ => [] + +val subExps : Mono.exp -> Mono.exp list = subExps' o #1 + +fun println str = print (str ^ "\n") +fun printlnExp exp = (Print.print (MonoPrint.p_exp MonoEnv.empty exp); println "") + +fun tablesRead (Query1 {From=tablePairs, ...}) = map #1 tablePairs + | tablesRead (Union (q1,q2)) = tablesRead q1 @ tablesRead q2 + +fun tableTouched (Insert (tab,_)) = tab + | tableTouched (Delete (tab,_)) = tab + | tableTouched (Update (tab,_,_)) = tab + +fun goExp (exp : Mono.exp) = + case #1 exp of + EQuery {query=e, ...} => ( + case parse query e of + SOME q => println ("Query reads from " ^ String.concatWith ", " (tablesRead q)) + | NONE => println "Couldn't parse query"; + printlnExp exp; println "") + | EDml (e,_) => ( + case parse dml e of + SOME d => println ("DML touches " ^ tableTouched d) + | NONE => println "Couldn't parse DML"; + printlnExp exp; println "") + | ENextval _ => (printlnExp exp; println "") + | ESetval _ => (printlnExp exp; println "") + (* Recurse down the syntax tree. *) + | _ => app goExp (subExps exp) + +fun goDecl (decl : decl) = + case #1 decl of + DVal (_,_,_,exp,_) => goExp exp + | DValRec xs => app (goExp o #4) xs + | _ => () + +fun goFile (file : file) = app goDecl (#1 file) + +fun go file = (println "Doing SQL analysis.\n"; goFile file; ()) + end -- cgit v1.2.3 From 93d6de491838eb3607a12686bfdc250366aa60e4 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 25 Mar 2014 02:04:06 -0400 Subject: ML half of initial prototype. (Doesn't compile because there's no C yet.) --- caching-tests/test.ur | 81 ++++++++++++++++++++++ caching-tests/test.urp | 6 ++ caching-tests/test.urs | 6 ++ src/compiler.sig | 4 +- src/compiler.sml | 8 +-- src/multimap_fn.sml | 14 ++++ src/sql.sig | 79 ++++++++++++++++++++++ src/sql.sml | 79 +--------------------- src/sql_cache.sml | 179 +++++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 372 insertions(+), 84 deletions(-) create mode 100644 caching-tests/test.ur create mode 100644 caching-tests/test.urp create mode 100644 caching-tests/test.urs create mode 100644 src/multimap_fn.sml create mode 100644 src/sql.sig create mode 100644 src/sql_cache.sml (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur new file mode 100644 index 00000000..4703e229 --- /dev/null +++ b/caching-tests/test.ur @@ -0,0 +1,81 @@ +table foo01 : {Id : int, Bar : string} PRIMARY KEY Id +table foo10 : {Id : int, Bar : string} PRIMARY KEY Id + +(* val query = (SELECT * FROM foo WHERE foo.Bar = "baz") *) +(* val insert = (INSERT INTO foo (Id, Bar) VALUES (42, "baz")) *) + +fun flush01 () : transaction page= + dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz")); + return + + + Flushed 1! + + + +fun flush10 () : transaction page= + dml (INSERT INTO foo10 (Id, Bar) VALUES (42, "baz")); + return + + + Flushed 2! + + + +fun flush11 () : transaction page= + dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz")); + dml (INSERT INTO foo10 (Id, Bar) VALUES (42, "baz")); + return + + + Flushed 1 and 2! + + + +fun cache01 () : transaction page = + res <- oneOrNoRows (SELECT foo01.Id, foo01.Bar + FROM foo01 + WHERE foo01.Bar = "baz"); + return + + + Reading 1. + {case res of + None => + | Some row => {[row.Foo01.Bar]}} + + + +fun cache10 () : transaction page = + res <- oneOrNoRows (SELECT foo10.Id, foo10.Bar + FROM foo10 + WHERE foo10.Bar = "baz"); + return + + + Reading 2. + {case res of + None => + | Some row => {[row.Foo10.Bar]}} + + + +fun cache11 () : transaction page = + res <- oneOrNoRows (SELECT foo01.Id, foo01.Bar + FROM foo01 + WHERE foo01.Bar = "baz"); + bla <- oneOrNoRows (SELECT foo10.Id, foo10.Bar + FROM foo10 + WHERE foo10.Bar = "baz"); + return + + + Reading 1 and 2. + {case res of + None => + | Some row => {[row.Foo01.Bar]}} + {case bla of + None => + | Some row => {[row.Foo10.Bar]}} + + diff --git a/caching-tests/test.urp b/caching-tests/test.urp new file mode 100644 index 00000000..e5111220 --- /dev/null +++ b/caching-tests/test.urp @@ -0,0 +1,6 @@ +database dbname=test +safeGet Test/flush01 +safeGet Test/flush10 +safeGet Test/flush11 + +test diff --git a/caching-tests/test.urs b/caching-tests/test.urs new file mode 100644 index 00000000..ce7d0350 --- /dev/null +++ b/caching-tests/test.urs @@ -0,0 +1,6 @@ +val cache01 : unit -> transaction page +val cache10 : unit -> transaction page +val cache11 : unit -> transaction page +val flush01 : unit -> transaction page +val flush10 : unit -> transaction page +val flush11 : unit -> transaction page diff --git a/src/compiler.sig b/src/compiler.sig index df567441..a0a653a7 100644 --- a/src/compiler.sig +++ b/src/compiler.sig @@ -122,7 +122,7 @@ signature COMPILER = sig val pathcheck : (Mono.file, Mono.file) phase val sidecheck : (Mono.file, Mono.file) phase val sigcheck : (Mono.file, Mono.file) phase - val sqlcache : (Mono.file, Mono.file) phase + val sqlCache : (Mono.file, Mono.file) phase val cjrize : (Mono.file, Cjr.file) phase val prepare : (Cjr.file, Cjr.file) phase val checknest : (Cjr.file, Cjr.file) phase @@ -187,7 +187,7 @@ signature COMPILER = sig val toPathcheck : (string, Mono.file) transform val toSidecheck : (string, Mono.file) transform val toSigcheck : (string, Mono.file) transform - val toSqlcache : (string, Mono.file) transform + val toSqlCache : (string, Mono.file) transform val toCjrize : (string, Cjr.file) transform val toPrepare : (string, Cjr.file) transform val toChecknest : (string, Cjr.file) transform diff --git a/src/compiler.sml b/src/compiler.sml index 36a1b03f..de10d8c8 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -1438,19 +1438,19 @@ val sigcheck = { val toSigcheck = transform sigcheck "sigcheck" o toSidecheck -val sqlcache = { - func = (fn file => (Sql.go file; file)), +val sqlCache = { + func = SqlCache.go, print = MonoPrint.p_file MonoEnv.empty } -val toSqlcache = transform sqlcache "sqlcache" o toSigcheck +val toSqlCache = transform sqlCache "sqlCache" o toSigcheck val cjrize = { func = Cjrize.cjrize, print = CjrPrint.p_file CjrEnv.empty } -val toCjrize = transform cjrize "cjrize" o toSqlcache +val toCjrize = transform cjrize "cjrize" o toSqlCache val prepare = { func = Prepare.prepare, diff --git a/src/multimap_fn.sml b/src/multimap_fn.sml new file mode 100644 index 00000000..585b741f --- /dev/null +++ b/src/multimap_fn.sml @@ -0,0 +1,14 @@ +functor MultimapFn (structure KeyMap : ORD_MAP structure ValSet : ORD_SET) = struct + type key = KeyMap.Key.ord_key + type item = ValSet.item + type items = ValSet.set + type multimap = ValSet.set KeyMap.map + fun inserts (kToVs : multimap, k : key, vs : items) : multimap = + KeyMap.unionWith ValSet.union (kToVs, KeyMap.singleton (k, vs)) + fun insert (kToVs : multimap, k : key, v : item) : multimap = + inserts (kToVs, k, ValSet.singleton v) + fun find (kToVs : multimap, k : key) = + case KeyMap.find (kToVs, k) of + SOME vs => vs + | NONE => ValSet.empty +end diff --git a/src/sql.sig b/src/sql.sig new file mode 100644 index 00000000..540844c3 --- /dev/null +++ b/src/sql.sig @@ -0,0 +1,79 @@ +signature SQL = sig + +val fu : Mono.file -> unit + +val debug : bool ref + +type lvar + +datatype func = + DtCon0 of string + | DtCon1 of string + | UnCon of string + | Other of string + +datatype exp = + Const of Prim.t + | Var of int + | Lvar of lvar + | Func of func * exp list + | Recd of (string * exp) list + | Proj of exp * string + +datatype reln = + Known + | Sql of string + | PCon0 of string + | PCon1 of string + | Eq + | Ne + | Lt + | Le + | Gt + | Ge + +datatype prop = + True + | False + | Unknown + | And of prop * prop + | Or of prop * prop + | Reln of reln * exp list + | Cond of exp * prop + +datatype ('a, 'b) sum = inl of 'a | inr of 'b + +datatype Rel = + Exps of exp * exp -> prop + | Props of prop * prop -> prop + +datatype sqexp = + SqConst of Prim.t + | SqTrue + | SqFalse + | SqNot of sqexp + | Field of string * string + | Computed of string + | Binop of Rel * sqexp * sqexp + | SqKnown of sqexp + | Inj of Mono.exp + | SqFunc of string * sqexp + | Unmodeled + | Null + +datatype sitem = + SqField of string * string + | SqExp of sqexp * string + +type query1 + +datatype query = + Query1 of query1 + | Union of query * query + +datatype dml = + Insert of string * (string * sqexp) list + | Delete of string * sqexp + | Update of string * (string * sqexp) list * sqexp + +end diff --git a/src/sql.sml b/src/sql.sml index 601b3510..6ac8bc68 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -2,7 +2,7 @@ structure Sql = struct open Mono -val debug = ref true (*false*) +val debug = ref false type lvar = int @@ -425,81 +425,4 @@ datatype querydml = val querydml = log "querydml" (altL [wrap dml Dml, wrap query Query]) -(* New stuff. *) - -fun subExps' (exp' : Mono.exp') = - case exp' of - ECon (_,_,SOME exp) => [exp] - | ESome (_,exp) => [exp] - | EFfiApp (_,_,xs) => map #1 xs - | EApp (exp1,exp2) => [exp1, exp2] - | EAbs (_,_,_,exp) => [exp] - | EUnop (_,exp) => [exp] - | EBinop (_,_,exp1,exp2) => [exp1, exp2] - | ERecord xs => map #2 xs - | EField (exp,_) => [exp] - | ECase (exp,xs,_) => exp :: map #2 xs - | EStrcat (exp1,exp2) => [exp1,exp2] - | EError (exp,_) => [exp] - | EReturnBlob {blob=NONE, mimeType, ...} => [mimeType] - | EReturnBlob {blob=SOME exp, mimeType, ...} => [exp, mimeType] - | ERedirect (exp,_) => [exp] - | EWrite exp => [exp] - | ESeq (exp1,exp2) => [exp1, exp2] - | ELet (_,_,exp1,exp2) => [exp1, exp2] - | EClosure (_,xs) => xs - | EQuery {query, body, initial, ...} => [query, body, initial] - | EDml (exp,_) => [exp] - | ENextval exp => [exp] - | ESetval (exp1,exp2) => [exp1, exp2] - | EUnurlify (exp,_,_) => [exp] - | EJavaScript (_,exp) => [exp] - | ESignalReturn exp => [exp] - | ESignalBind (exp1,exp2) => [exp1, exp2] - | ESignalSource exp => [exp] - | EServerCall (exp,_,_,_) => [exp] - | ERecv (exp,_) => [exp] - | ESleep exp => [exp] - | ESpawn exp => [exp] - | _ => [] - -val subExps : Mono.exp -> Mono.exp list = subExps' o #1 - -fun println str = print (str ^ "\n") -fun printlnExp exp = (Print.print (MonoPrint.p_exp MonoEnv.empty exp); println "") - -fun tablesRead (Query1 {From=tablePairs, ...}) = map #1 tablePairs - | tablesRead (Union (q1,q2)) = tablesRead q1 @ tablesRead q2 - -fun tableTouched (Insert (tab,_)) = tab - | tableTouched (Delete (tab,_)) = tab - | tableTouched (Update (tab,_,_)) = tab - -fun goExp (exp : Mono.exp) = - case #1 exp of - EQuery {query=e, ...} => ( - case parse query e of - SOME q => println ("Query reads from " ^ String.concatWith ", " (tablesRead q)) - | NONE => println "Couldn't parse query"; - printlnExp exp; println "") - | EDml (e,_) => ( - case parse dml e of - SOME d => println ("DML touches " ^ tableTouched d) - | NONE => println "Couldn't parse DML"; - printlnExp exp; println "") - | ENextval _ => (printlnExp exp; println "") - | ESetval _ => (printlnExp exp; println "") - (* Recurse down the syntax tree. *) - | _ => app goExp (subExps exp) - -fun goDecl (decl : decl) = - case #1 decl of - DVal (_,_,_,exp,_) => goExp exp - | DValRec xs => app (goExp o #4) xs - | _ => () - -fun goFile (file : file) = app goDecl (#1 file) - -fun go file = (println "Doing SQL analysis.\n"; goFile file; ()) - end diff --git a/src/sql_cache.sml b/src/sql_cache.sml new file mode 100644 index 00000000..072eefb5 --- /dev/null +++ b/src/sql_cache.sml @@ -0,0 +1,179 @@ +structure SqlCache = struct + +open Sql +open Mono + +structure IS = IntBinarySet +structure IM = IntBinaryMap +structure StringKey = struct type ord_key = string val compare = String.compare end +structure SS = BinarySetFn (StringKey) +structure SM = BinaryMapFn (StringKey) +structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) + +val rec tablesRead = + fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) + +val tableWritten = + fn Insert (tab, _) => tab + | Delete (tab, _) => tab + | Update (tab, _, _) => tab + +fun tablesInExp' exp' = + let + val nothing = {read = SS.empty, written = SS.empty} + in + case exp' of + EQuery {query=e, ...} => + (case parse query e of + SOME q => {read = tablesRead q, written = SS.empty} + | NONE => nothing) + | EDml (e, _) => + (case parse dml e of + SOME q => {read = SS.empty, written = SS.singleton (tableWritten q)} + | NONE => nothing) + | _ => nothing + end + +val tablesInExp = + let + fun addTables (exp', {read, written}) = + let val {read = r, written = w} = tablesInExp' exp' + in {read = SS.union (r, read), written = SS.union (w, written)} end + in + MonoUtil.Exp.fold {typ = #2, exp = addTables} + {read = SS.empty, written = SS.empty} + end + +fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) +fun intTyp loc = (TFfi ("Basis", "int"), loc) +fun boolPat (b, loc) = (PCon (Enum, + PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, + con = if b then "True" else "False"}, + NONE), + loc) +fun boolTyp loc = (TFfi ("Basis", "int"), loc) + +fun ffiAppExp (module, func, arg, loc) = + (EFfiApp (module, func, [(intExp (arg, loc), intTyp loc)]), loc) + +fun sequence (befores, center, afters, loc) = + List.foldr (fn (exp, seq) => (ESeq (exp, seq), loc)) + (List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) + center + afters) + befores + +fun antiguardUnit (cond, exp, loc) = + (ECase (cond, + [(boolPat (false, loc), exp), + (boolPat (true, loc), (ERecord [], loc))], + {disc = boolTyp loc, result = (TRecord [], loc)}), + loc) + +fun underAbs f (exp as (exp', loc)) = + case exp' of + EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) + | _ => f exp + +fun addCacheCheck (index, exp) = + let + fun f (body as (_, loc)) = + let + val check = ffiAppExp ("Cache", "check", index, loc) + val store = ffiAppExp ("Cache", "store", index, loc) + in + antiguardUnit (check, sequence ([], body, [store], loc), loc) + end + in + underAbs f exp + end + +fun addCacheFlush (exp, tablesToIndices) = + let + fun addIndices (table, indices) = IS.union (indices, SIMM.find (tablesToIndices, table)) + fun f (body as (_, loc)) = + let + fun mapFfi func = List.map (fn i => ffiAppExp ("Cache", func, i, loc)) + val flushes = + IS.listItems (SS.foldr addIndices IS.empty (#written (tablesInExp body))) + + in + sequence (mapFfi "flush" flushes, body, mapFfi "ready" flushes, loc) + end + in + underAbs f exp + end + +val handlerIndices = + let + val isUnit = + fn (TRecord [], _) => true + | _ => false + fun maybeAdd (d, soFar as {readers, writers}) = + case d of + DExport (Link ReadOnly, _, name, typs, typ, _) => + if List.all isUnit (typ::typs) + then {readers = IS.add (readers, name), writers = writers} + else soFar + | DExport (_, _, name, _, _, _) => (* Not read only. *) + {readers = readers, writers = IS.add (writers, name)} + | _ => soFar + in + MonoUtil.File.fold {typ = #2, exp = #2, decl = maybeAdd} + {readers = IS.empty, writers = IS.empty} + end + +fun fileFoldMapiSelected f init (file, indices) = + let + fun doExp (original as ((a, index, b, exp, c), state)) = + if IS.member (indices, index) + then let val (newExp, newState) = f (index, exp, state) + in ((a, index, b, newExp, c), newState) end + else original + fun doDecl decl state = + let + val result = + case decl of + DVal x => + let val (y, newState) = doExp (x, state) + in (DVal y, newState) end + | DValRec xs => + let val (ys, newState) = ListUtil.foldlMap doExp state xs + in (DValRec ys, newState) end + | _ => (decl, state) + in + Search.Continue result + end + fun nada x y = Search.Continue (x, y) + in + case MonoUtil.File.mapfold {typ = nada, exp = nada, decl = doDecl} file init of + Search.Continue x => x + | _ => (file, init) (* Should never happen. *) + end + +fun fileMapSelected f = #1 o fileFoldMapiSelected (fn (_, x, _) => (f x, ())) () + +val addCacheChecking = + let + fun f (index, exp, tablesToIndices) = + (addCacheCheck (index, exp), + SS.foldr (fn (table, tsToIs) => SIMM.insert (tsToIs, table, index)) + tablesToIndices + (#read (tablesInExp exp))) + in + fileFoldMapiSelected f (SM.empty) + end + +fun addCacheFlushing (file, tablesToIndices, writers) = + fileMapSelected (fn exp => addCacheFlush (exp, tablesToIndices)) (file, writers) + +fun go file = + let + val {readers, writers} = handlerIndices file + val (fileWithChecks, tablesToIndices) = addCacheChecking (file, readers) + in + addCacheFlushing (fileWithChecks, tablesToIndices, writers) + end + +end -- cgit v1.2.3 From b1516ed386ca303a526959586f0a06564ca77bb0 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 27 May 2014 21:14:13 -0400 Subject: Finishes initial prototype, caching parameterless pages with table-match-based invalidation. Still has problems parsing non-Postgres SQL dialects properly. --- caching-tests/test.db | Bin 0 -> 3072 bytes caching-tests/test.sql | 11 +++++ caching-tests/test.ur | 112 +++++++++++++++++----------------------------- caching-tests/test.urp | 3 +- include/urweb/urweb_cpp.h | 4 ++ src/c/urweb.c | 18 ++++++++ src/cjr_print.sml | 66 ++++++++++++++++++++++----- src/compiler.sml | 2 +- src/sources | 5 +++ src/sql.sig | 22 ++++++--- src/sql.sml | 2 +- src/sql_cache.sml | 11 ++++- 12 files changed, 165 insertions(+), 91 deletions(-) create mode 100644 caching-tests/test.db create mode 100644 caching-tests/test.sql (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db new file mode 100644 index 00000000..190d2868 Binary files /dev/null and b/caching-tests/test.db differ diff --git a/caching-tests/test.sql b/caching-tests/test.sql new file mode 100644 index 00000000..862245b7 --- /dev/null +++ b/caching-tests/test.sql @@ -0,0 +1,11 @@ +CREATE TABLE uw_Test_foo01(uw_id integer NOT NULL, uw_bar text NOT NULL, + PRIMARY KEY (uw_id) + + ); + + CREATE TABLE uw_Test_foo10(uw_id integer NOT NULL, uw_bar text NOT NULL, + PRIMARY KEY (uw_id) + + ); + + \ No newline at end of file diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 4703e229..d13379a8 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -1,81 +1,53 @@ table foo01 : {Id : int, Bar : string} PRIMARY KEY Id table foo10 : {Id : int, Bar : string} PRIMARY KEY Id -(* val query = (SELECT * FROM foo WHERE foo.Bar = "baz") *) -(* val insert = (INSERT INTO foo (Id, Bar) VALUES (42, "baz")) *) +fun flush01 () : transaction page = + dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz01")); + dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); + return + Flushed 1! + -fun flush01 () : transaction page= - dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz")); - return - - - Flushed 1! - - +fun flush10 () : transaction page = + dml (UPDATE foo10 SET Bar = "baz10" WHERE Id = 42); + return + Flushed 2! + -fun flush10 () : transaction page= - dml (INSERT INTO foo10 (Id, Bar) VALUES (42, "baz")); - return - - - Flushed 2! - - - -fun flush11 () : transaction page= - dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz")); - dml (INSERT INTO foo10 (Id, Bar) VALUES (42, "baz")); - return - - - Flushed 1 and 2! - - +fun flush11 () : transaction page = + dml (UPDATE foo01 SET Bar = "baz11" WHERE Id = 42); + dml (UPDATE foo10 SET Bar = "baz11" WHERE Id = 42); + return + Flushed 1 and 2! + fun cache01 () : transaction page = - res <- oneOrNoRows (SELECT foo01.Id, foo01.Bar - FROM foo01 - WHERE foo01.Bar = "baz"); - return - - - Reading 1. - {case res of - None => - | Some row => {[row.Foo01.Bar]}} - - + res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); + return + Reading 1. + {case res of + None => + | Some row => {[row.Foo01.Bar]}} + fun cache10 () : transaction page = - res <- oneOrNoRows (SELECT foo10.Id, foo10.Bar - FROM foo10 - WHERE foo10.Bar = "baz"); - return - - - Reading 2. - {case res of - None => - | Some row => {[row.Foo10.Bar]}} - - + res <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); + return + Reading 2. + {case res of + None => + | Some row => {[row.Foo10.Bar]}} + fun cache11 () : transaction page = - res <- oneOrNoRows (SELECT foo01.Id, foo01.Bar - FROM foo01 - WHERE foo01.Bar = "baz"); - bla <- oneOrNoRows (SELECT foo10.Id, foo10.Bar - FROM foo10 - WHERE foo10.Bar = "baz"); - return - - - Reading 1 and 2. - {case res of - None => - | Some row => {[row.Foo01.Bar]}} - {case bla of - None => - | Some row => {[row.Foo10.Bar]}} - - + res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); + bla <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); + return + Reading 1 and 2. + {case res of + None => + | Some row => {[row.Foo01.Bar]}} + {case bla of + None => + | Some row => {[row.Foo10.Bar]}} + diff --git a/caching-tests/test.urp b/caching-tests/test.urp index e5111220..123f58e5 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -1,4 +1,5 @@ -database dbname=test +database test.db +sql test.sql safeGet Test/flush01 safeGet Test/flush10 safeGet Test/flush11 diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 1bb6b2f2..799d0861 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -75,6 +75,10 @@ int uw_next_entry(struct uw_context *); void uw_write(struct uw_context *, const char*); +// For caching. +void uw_recordingStart(struct uw_context *); +char *uw_recordingRead(struct uw_context *); + uw_Basis_source uw_Basis_new_client_source(struct uw_context *, uw_Basis_string); uw_unit uw_Basis_set_client_source(struct uw_context *, uw_Basis_source, uw_Basis_string); diff --git a/src/c/urweb.c b/src/c/urweb.c index ffcc0146..d4c0b439 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -477,6 +477,9 @@ struct uw_context { char *output_buffer; size_t output_buffer_size; + + // For caching. + char *recording; }; size_t uw_headers_max = SIZE_MAX; @@ -560,6 +563,8 @@ uw_context uw_init(int id, void *logger_data, uw_logger log_debug) { ctx->output_buffer = malloc(1); ctx->output_buffer_size = 1; + ctx->recording = 0; + return ctx; } @@ -1636,6 +1641,19 @@ void uw_write(uw_context ctx, const char* s) { *ctx->page.front = 0; } +void uw_recordingStart(uw_context ctx) { + // TODO: remove following debug statement. + uw_write(ctx, ""); + ctx->recording = ctx->page.front; +} + +char *uw_recordingRead(uw_context ctx) { + char *recording = strdup(ctx->recording); + // TODO: remove following debug statement. + uw_write(ctx, ""); + return recording; +} + char *uw_Basis_attrifyInt(uw_context ctx, uw_Basis_int n) { char *result; int len; diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 05dce35e..ecd29f71 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -734,7 +734,7 @@ fun unurlify fromClient env (t, loc) = string (Int.toString (size has_arg)), string ", ((*request)[0] == '/' ? ++*request : NULL), ", newline, - + if unboxable then unurlify' "(*request)" (#1 t) else @@ -914,7 +914,7 @@ fun unurlify fromClient env (t, loc) = space, string "4, ((*request)[0] == '/' ? ++*request : NULL), ", newline, - + string "({", newline, p_typ env (t, loc), @@ -1188,7 +1188,7 @@ fun urlify env t = string "(ctx,", space, string "it", - string (Int.toString level), + string (Int.toString level), string ");", newline] else @@ -1388,7 +1388,7 @@ fun urlify env t = string (Int.toString level), string ");", newline]) - + | _ => (ErrorMsg.errorAt loc "Unable to choose a URL encoding function"; space) in @@ -1578,7 +1578,7 @@ and p_exp' par tail env (e, loc) = newline], string "tmp;", newline, - string "})"] + string "})"] end | ENone _ => string "NULL" | ESome (t, e) => @@ -2078,7 +2078,7 @@ and p_exp' par tail env (e, loc) = space, p_exp' false false (E.pushERel (E.pushERel env "r" (TRecord rnum, loc)) - "acc" state) + "acc" state) body, string ";", newline] @@ -2102,7 +2102,7 @@ and p_exp' par tail env (e, loc) = newline, string "uw_ensure_transaction(ctx);", newline, - + case prepared of NONE => box [string "char *query = ", @@ -2187,7 +2187,7 @@ and p_exp' par tail env (e, loc) = string "uw_ensure_transaction(ctx);", newline, newline, - + #dmlPrepared (Settings.currentDbms ()) {loc = loc, id = id, dml = dml', @@ -3378,6 +3378,50 @@ fun p_file env (ds, ps) = newline, newline, + (* For caching. *) + box (List.map + (fn index => + let val i = Int.toString index + in box [string "static char *cache", + string i, + string " = NULL;", + newline, + string "static uw_Basis_bool uw_Cache_check", + string i, + string "(uw_context ctx) { puts(\"Checked ", + string i, + string "\"); if (cache", + string i, + string " == NULL) { uw_recordingStart(ctx); return uw_Basis_False; } else { uw_write(ctx, cache", + string i, + string "); return uw_Basis_True; } };", + newline, + string "static uw_unit uw_Cache_store", + string i, + string "(uw_context ctx) { cache", + string i, + string " = uw_recordingRead(ctx); puts(\"Stored ", + string i, + string "\"); return uw_unit_v; };", + newline, + string "static uw_unit uw_Cache_flush", + string i, + string "(uw_context ctx) { free(cache", + string i, + string "); cache", + string i, + string " = NULL; puts(\"Flushed ", + string i, + string "\"); return uw_unit_v; };", + newline, + string "static uw_unit uw_Cache_ready", + string i, + string "(uw_context ctx) { return uw_unit_v; };", + newline, + newline] + end) + (!SqlCache.ffiIndices)), + newline, p_list_sep newline (fn x => x) pds, newline, @@ -3433,7 +3477,7 @@ fun p_file env (ds, ps) = makeChecker ("uw_check_envVar", Settings.getEnvVarRules ()), newline, - + string "extern void uw_sign(const char *in, char *out);", newline, string "extern int uw_hash_blocksize;", @@ -3480,7 +3524,7 @@ fun p_file env (ds, ps) = newline, string ("uw_write_header(ctx, \"Last-modified: " ^ Date.fmt rfcFmt nowD ^ "\\r\\n\");"), newline, - string ("uw_write_header(ctx, \"Cache-Control: max-age=31536000, public\\r\\n\");"), + string ("uw_write_header(ctx, \"Cache-Control: max-age=31536000, public\\r\\n\");"), newline, string "uw_write(ctx, jslib);", newline, diff --git a/src/compiler.sml b/src/compiler.sml index de10d8c8..37272758 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -606,7 +606,7 @@ fun parseUrp' accLibs fname = filterEnv = rev (!env), sources = sources, protocol = !protocol, - dbms = !dbms, + dbms = (*!dbms*) SOME "sqlite", sigFile = !sigFile, safeGets = rev (!safeGets), onError = !onError, diff --git a/src/sources b/src/sources index f75803a3..b468c9a5 100644 --- a/src/sources +++ b/src/sources @@ -186,8 +186,13 @@ $(SRC)/mono_shake.sml $(SRC)/fuse.sig $(SRC)/fuse.sml +$(SRC)/sql.sig $(SRC)/sql.sml +$(SRC)/multimap_fn.sml + +$(SRC)/sql_cache.sml + $(SRC)/iflow.sig $(SRC)/iflow.sml diff --git a/src/sql.sig b/src/sql.sig index 540844c3..573a8baf 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -1,10 +1,8 @@ signature SQL = sig -val fu : Mono.file -> unit - val debug : bool ref -type lvar +type lvar = int datatype func = DtCon0 of string @@ -41,7 +39,13 @@ datatype prop = | Reln of reln * exp list | Cond of exp * prop -datatype ('a, 'b) sum = inl of 'a | inr of 'b +datatype chunk = + String of string + | Exp of Mono.exp + +type 'a parser = chunk list -> ('a * chunk list) option + +val parse : 'a parser -> Mono.exp -> 'a option datatype Rel = Exps of exp * exp -> prop @@ -61,19 +65,27 @@ datatype sqexp = | Unmodeled | Null +datatype ('a,'b) sum = inl of 'a | inr of 'b + datatype sitem = SqField of string * string | SqExp of sqexp * string -type query1 +type query1 = {Select : sitem list, + From : (string * string) list, + Where : sqexp option} datatype query = Query1 of query1 | Union of query * query +val query : query parser + datatype dml = Insert of string * (string * sqexp) list | Delete of string * sqexp | Update of string * (string * sqexp) list * sqexp +val dml : dml parser + end diff --git a/src/sql.sml b/src/sql.sml index 6ac8bc68..8642c9d2 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -1,4 +1,4 @@ -structure Sql = struct +structure Sql :> SQL = struct open Mono diff --git a/src/sql_cache.sml b/src/sql_cache.sml index 072eefb5..7f9d98d0 100644 --- a/src/sql_cache.sml +++ b/src/sql_cache.sml @@ -10,6 +10,10 @@ structure SS = BinarySetFn (StringKey) structure SM = BinaryMapFn (StringKey) structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) +val ffiIndices : int list ref = ref [] +val rs : int list ref = ref [] +val ws : int list ref = ref [] + val rec tablesRead = fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) @@ -54,8 +58,8 @@ fun boolPat (b, loc) = (PCon (Enum, loc) fun boolTyp loc = (TFfi ("Basis", "int"), loc) -fun ffiAppExp (module, func, arg, loc) = - (EFfiApp (module, func, [(intExp (arg, loc), intTyp loc)]), loc) +fun ffiAppExp (module, func, index, loc) = + (EFfiApp (module, func ^ Int.toString index, []), loc) fun sequence (befores, center, afters, loc) = List.foldr (fn (exp, seq) => (ESeq (exp, seq), loc)) @@ -173,6 +177,9 @@ fun go file = val {readers, writers} = handlerIndices file val (fileWithChecks, tablesToIndices) = addCacheChecking (file, readers) in + rs := IS.listItems readers; + ws := IS.listItems writers; + ffiIndices := IS.listItems readers; addCacheFlushing (fileWithChecks, tablesToIndices, writers) end -- cgit v1.2.3 From 8766bd82ba931a28976319fe5bd76dc97d1218d6 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 30 May 2014 12:00:00 -0400 Subject: Removes DBMS choice override from compiler.sml (which was there for debugging). --- src/compiler.sml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/compiler.sml b/src/compiler.sml index fd143485..cbc6478d 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -606,7 +606,7 @@ fun parseUrp' accLibs fname = filterEnv = rev (!env), sources = sources, protocol = !protocol, - dbms = (*!dbms*) SOME "sqlite", + dbms = !dbms, sigFile = !sigFile, safeGets = rev (!safeGets), onError = !onError, -- cgit v1.2.3 From 171e5ecea687a43033e92c98c0661cc161d50e4a Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 31 May 2014 03:08:16 -0400 Subject: Cleans up interface (it's now a command line option) and renames project to "sqlcache" in the all-one-word style. Still has issues to do with concurrency, retrying transactions, and foreign function calls that either rely on state or have side effects. --- caching-tests/test.ur | 1 - src/c/urweb.c | 7 +- src/cjr_print.sml | 14 ++-- src/compiler.sig | 5 +- src/compiler.sml | 9 +-- src/main.mlton.sml | 12 ++-- src/sources | 9 +-- src/sql.sml | 20 +++--- src/sql_cache.sml | 186 -------------------------------------------------- src/sqlcache.sig | 6 ++ src/sqlcache.sml | 182 ++++++++++++++++++++++++++++++++++++++++++++++++ 11 files changed, 227 insertions(+), 224 deletions(-) delete mode 100644 src/sql_cache.sml create mode 100644 src/sqlcache.sig create mode 100644 src/sqlcache.sml (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index d13379a8..a99a387b 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -2,7 +2,6 @@ table foo01 : {Id : int, Bar : string} PRIMARY KEY Id table foo10 : {Id : int, Bar : string} PRIMARY KEY Id fun flush01 () : transaction page = - dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz01")); dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); return Flushed 1! diff --git a/src/c/urweb.c b/src/c/urweb.c index 10bbf930..57762da8 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -1667,16 +1667,11 @@ void uw_write(uw_context ctx, const char* s) { } void uw_recordingStart(uw_context ctx) { - // TODO: remove following debug statement. - uw_write(ctx, ""); ctx->recording = ctx->page.front; } char *uw_recordingRead(uw_context ctx) { - char *recording = strdup(ctx->recording); - // TODO: remove following debug statement. - uw_write(ctx, ""); - return recording; + return strdup(ctx->recording); } char *uw_Basis_attrifyInt(uw_context ctx, uw_Basis_int n) { diff --git a/src/cjr_print.sml b/src/cjr_print.sml index ecd29f71..af2340fe 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3388,9 +3388,9 @@ fun p_file env (ds, ps) = newline, string "static uw_Basis_bool uw_Cache_check", string i, - string "(uw_context ctx) { puts(\"Checked ", + string "(uw_context ctx) { puts(\"Checked cache ", string i, - string "\"); if (cache", + string ".\"); if (cache", string i, string " == NULL) { uw_recordingStart(ctx); return uw_Basis_False; } else { uw_write(ctx, cache", string i, @@ -3400,9 +3400,9 @@ fun p_file env (ds, ps) = string i, string "(uw_context ctx) { cache", string i, - string " = uw_recordingRead(ctx); puts(\"Stored ", + string " = uw_recordingRead(ctx); puts(\"Stored cache ", string i, - string "\"); return uw_unit_v; };", + string ".\"); return uw_unit_v; };", newline, string "static uw_unit uw_Cache_flush", string i, @@ -3410,9 +3410,9 @@ fun p_file env (ds, ps) = string i, string "); cache", string i, - string " = NULL; puts(\"Flushed ", + string " = NULL; puts(\"Flushed cache ", string i, - string "\"); return uw_unit_v; };", + string ".\"); return uw_unit_v; };", newline, string "static uw_unit uw_Cache_ready", string i, @@ -3420,7 +3420,7 @@ fun p_file env (ds, ps) = newline, newline] end) - (!SqlCache.ffiIndices)), + (!Sqlcache.ffiIndices)), newline, p_list_sep newline (fn x => x) pds, diff --git a/src/compiler.sig b/src/compiler.sig index a0a653a7..81d92694 100644 --- a/src/compiler.sig +++ b/src/compiler.sig @@ -122,7 +122,7 @@ signature COMPILER = sig val pathcheck : (Mono.file, Mono.file) phase val sidecheck : (Mono.file, Mono.file) phase val sigcheck : (Mono.file, Mono.file) phase - val sqlCache : (Mono.file, Mono.file) phase + val sqlcache : (Mono.file, Mono.file) phase val cjrize : (Mono.file, Cjr.file) phase val prepare : (Cjr.file, Cjr.file) phase val checknest : (Cjr.file, Cjr.file) phase @@ -187,7 +187,7 @@ signature COMPILER = sig val toPathcheck : (string, Mono.file) transform val toSidecheck : (string, Mono.file) transform val toSigcheck : (string, Mono.file) transform - val toSqlCache : (string, Mono.file) transform + val toSqlcache : (string, Mono.file) transform val toCjrize : (string, Cjr.file) transform val toPrepare : (string, Cjr.file) transform val toChecknest : (string, Cjr.file) transform @@ -198,6 +198,7 @@ signature COMPILER = sig val enableBoot : unit -> unit val doIflow : bool ref + val doSqlcache : bool ref val addPath : string * string -> unit val addModuleRoot : string * string -> unit diff --git a/src/compiler.sml b/src/compiler.sml index cbc6478d..26e07e2a 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -83,6 +83,7 @@ type ('src, 'dst) transform = { val debug = ref false val dumpSource = ref false val doIflow = ref false +val doSqlcache = ref false val doDumpSource = ref (fn () => ()) @@ -1439,19 +1440,19 @@ val sigcheck = { val toSigcheck = transform sigcheck "sigcheck" o toSidecheck -val sqlCache = { - func = SqlCache.go, +val sqlcache = { + func = (fn file => (if !doSqlcache then Sqlcache.go file else file)), print = MonoPrint.p_file MonoEnv.empty } -val toSqlCache = transform sqlCache "sqlCache" o toSigcheck +val toSqlcache = transform sqlcache "sqlcache" o toSigcheck val cjrize = { func = Cjrize.cjrize, print = CjrPrint.p_file CjrEnv.empty } -val toCjrize = transform cjrize "cjrize" o toSqlCache +val toCjrize = transform cjrize "cjrize" o toSqlcache val prepare = { func = Prepare.prepare, diff --git a/src/main.mlton.sml b/src/main.mlton.sml index bfc18e59..5ecd7290 100644 --- a/src/main.mlton.sml +++ b/src/main.mlton.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -47,6 +47,7 @@ fun oneRun args = Elaborate.unifyMore := false; Compiler.dumpSource := false; Compiler.doIflow := false; + Compiler.doSqlcache := false; Demo.noEmacs := false; Settings.setDebug false) @@ -64,7 +65,7 @@ fun oneRun args = fun doArgs args = case args of [] => () - | "-version" :: rest => + | "-version" :: rest => printVersion () | "-numeric-version" :: rest => printNumericVersion () @@ -159,6 +160,9 @@ fun oneRun args = | "-iflow" :: rest => (Compiler.doIflow := true; doArgs rest) + | "-sqlcache" :: rest => + (Compiler.doSqlcache := true; + doArgs rest) | "-moduleOf" :: fname :: _ => (print (Compiler.moduleOf fname ^ "\n"); raise Code OS.Process.success) @@ -306,7 +310,7 @@ val () = case CommandLine.arguments () of (* Redirect the daemon's output to the socket. *) redirect Posix.FileSys.stdout; redirect Posix.FileSys.stderr; - + loop' ("", []); Socket.close sock; @@ -325,7 +329,7 @@ val () = case CommandLine.arguments () of loop () end) | ["daemon", "stop"] => - (OS.FileSys.remove socket handle OS.SysErr _ => OS.Process.exit OS.Process.success) + (OS.FileSys.remove socket handle OS.SysErr _ => OS.Process.exit OS.Process.success) | args => let val sock = UnixSock.Strm.socket () diff --git a/src/sources b/src/sources index b468c9a5..a87678f9 100644 --- a/src/sources +++ b/src/sources @@ -189,10 +189,6 @@ $(SRC)/fuse.sml $(SRC)/sql.sig $(SRC)/sql.sml -$(SRC)/multimap_fn.sml - -$(SRC)/sql_cache.sml - $(SRC)/iflow.sig $(SRC)/iflow.sml @@ -211,6 +207,11 @@ $(SRC)/sidecheck.sml $(SRC)/sigcheck.sig $(SRC)/sigcheck.sml +$(SRC)/multimap_fn.sml + +$(SRC)/sqlcache.sig +$(SRC)/sqlcache.sml + $(SRC)/cjr.sml $(SRC)/postgres.sig diff --git a/src/sql.sml b/src/sql.sml index 8642c9d2..11df715c 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -177,10 +177,10 @@ val uw_ident = wrapP ident (fn s => if String.isPrefix "uw_" s andalso size s >= else NONE) -val field = wrap (follow t_ident - (follow (const ".") - uw_ident)) - (fn (t, ((), f)) => (t, f)) +val field = wrap (follow (opt (follow t_ident (const "."))) + uw_ident) + (fn (SOME (t, ()), f) => (t, f) + | (NONE, f) => ("T", f)) (* Should probably deal with this MySQL/SQLite case better some day. *) datatype Rel = Exps of exp * exp -> prop @@ -396,22 +396,22 @@ val insert = log "insert" val delete = log "delete" (wrap (follow (const "DELETE FROM ") (follow uw_ident - (follow (const " AS T_T WHERE ") + (follow (follow (opt (const " AS T_T")) (const " WHERE ")) sqexp))) - (fn ((), (tab, ((), es))) => (tab, es))) + (fn ((), (tab, (_, es))) => (tab, es))) val setting = log "setting" - (wrap (follow uw_ident (follow (const " = ") sqexp)) - (fn (f, ((), e)) => (f, e))) + (wrap (follow uw_ident (follow (const " = ") sqexp)) + (fn (f, ((), e)) => (f, e))) val update = log "update" (wrap (follow (const "UPDATE ") (follow uw_ident - (follow (const " AS T_T SET ") + (follow (follow (opt (const " AS T_T")) (const " SET ")) (follow (list setting) (follow (ws (const "WHERE ")) sqexp))))) - (fn ((), (tab, ((), (fs, ((), e))))) => + (fn ((), (tab, (_, (fs, ((), e))))) => (tab, fs, e))) val dml = log "dml" diff --git a/src/sql_cache.sml b/src/sql_cache.sml deleted file mode 100644 index 7f9d98d0..00000000 --- a/src/sql_cache.sml +++ /dev/null @@ -1,186 +0,0 @@ -structure SqlCache = struct - -open Sql -open Mono - -structure IS = IntBinarySet -structure IM = IntBinaryMap -structure StringKey = struct type ord_key = string val compare = String.compare end -structure SS = BinarySetFn (StringKey) -structure SM = BinaryMapFn (StringKey) -structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) - -val ffiIndices : int list ref = ref [] -val rs : int list ref = ref [] -val ws : int list ref = ref [] - -val rec tablesRead = - fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) - | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) - -val tableWritten = - fn Insert (tab, _) => tab - | Delete (tab, _) => tab - | Update (tab, _, _) => tab - -fun tablesInExp' exp' = - let - val nothing = {read = SS.empty, written = SS.empty} - in - case exp' of - EQuery {query=e, ...} => - (case parse query e of - SOME q => {read = tablesRead q, written = SS.empty} - | NONE => nothing) - | EDml (e, _) => - (case parse dml e of - SOME q => {read = SS.empty, written = SS.singleton (tableWritten q)} - | NONE => nothing) - | _ => nothing - end - -val tablesInExp = - let - fun addTables (exp', {read, written}) = - let val {read = r, written = w} = tablesInExp' exp' - in {read = SS.union (r, read), written = SS.union (w, written)} end - in - MonoUtil.Exp.fold {typ = #2, exp = addTables} - {read = SS.empty, written = SS.empty} - end - -fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) -fun intTyp loc = (TFfi ("Basis", "int"), loc) -fun boolPat (b, loc) = (PCon (Enum, - PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, - con = if b then "True" else "False"}, - NONE), - loc) -fun boolTyp loc = (TFfi ("Basis", "int"), loc) - -fun ffiAppExp (module, func, index, loc) = - (EFfiApp (module, func ^ Int.toString index, []), loc) - -fun sequence (befores, center, afters, loc) = - List.foldr (fn (exp, seq) => (ESeq (exp, seq), loc)) - (List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) - center - afters) - befores - -fun antiguardUnit (cond, exp, loc) = - (ECase (cond, - [(boolPat (false, loc), exp), - (boolPat (true, loc), (ERecord [], loc))], - {disc = boolTyp loc, result = (TRecord [], loc)}), - loc) - -fun underAbs f (exp as (exp', loc)) = - case exp' of - EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) - | _ => f exp - -fun addCacheCheck (index, exp) = - let - fun f (body as (_, loc)) = - let - val check = ffiAppExp ("Cache", "check", index, loc) - val store = ffiAppExp ("Cache", "store", index, loc) - in - antiguardUnit (check, sequence ([], body, [store], loc), loc) - end - in - underAbs f exp - end - -fun addCacheFlush (exp, tablesToIndices) = - let - fun addIndices (table, indices) = IS.union (indices, SIMM.find (tablesToIndices, table)) - fun f (body as (_, loc)) = - let - fun mapFfi func = List.map (fn i => ffiAppExp ("Cache", func, i, loc)) - val flushes = - IS.listItems (SS.foldr addIndices IS.empty (#written (tablesInExp body))) - - in - sequence (mapFfi "flush" flushes, body, mapFfi "ready" flushes, loc) - end - in - underAbs f exp - end - -val handlerIndices = - let - val isUnit = - fn (TRecord [], _) => true - | _ => false - fun maybeAdd (d, soFar as {readers, writers}) = - case d of - DExport (Link ReadOnly, _, name, typs, typ, _) => - if List.all isUnit (typ::typs) - then {readers = IS.add (readers, name), writers = writers} - else soFar - | DExport (_, _, name, _, _, _) => (* Not read only. *) - {readers = readers, writers = IS.add (writers, name)} - | _ => soFar - in - MonoUtil.File.fold {typ = #2, exp = #2, decl = maybeAdd} - {readers = IS.empty, writers = IS.empty} - end - -fun fileFoldMapiSelected f init (file, indices) = - let - fun doExp (original as ((a, index, b, exp, c), state)) = - if IS.member (indices, index) - then let val (newExp, newState) = f (index, exp, state) - in ((a, index, b, newExp, c), newState) end - else original - fun doDecl decl state = - let - val result = - case decl of - DVal x => - let val (y, newState) = doExp (x, state) - in (DVal y, newState) end - | DValRec xs => - let val (ys, newState) = ListUtil.foldlMap doExp state xs - in (DValRec ys, newState) end - | _ => (decl, state) - in - Search.Continue result - end - fun nada x y = Search.Continue (x, y) - in - case MonoUtil.File.mapfold {typ = nada, exp = nada, decl = doDecl} file init of - Search.Continue x => x - | _ => (file, init) (* Should never happen. *) - end - -fun fileMapSelected f = #1 o fileFoldMapiSelected (fn (_, x, _) => (f x, ())) () - -val addCacheChecking = - let - fun f (index, exp, tablesToIndices) = - (addCacheCheck (index, exp), - SS.foldr (fn (table, tsToIs) => SIMM.insert (tsToIs, table, index)) - tablesToIndices - (#read (tablesInExp exp))) - in - fileFoldMapiSelected f (SM.empty) - end - -fun addCacheFlushing (file, tablesToIndices, writers) = - fileMapSelected (fn exp => addCacheFlush (exp, tablesToIndices)) (file, writers) - -fun go file = - let - val {readers, writers} = handlerIndices file - val (fileWithChecks, tablesToIndices) = addCacheChecking (file, readers) - in - rs := IS.listItems readers; - ws := IS.listItems writers; - ffiIndices := IS.listItems readers; - addCacheFlushing (fileWithChecks, tablesToIndices, writers) - end - -end diff --git a/src/sqlcache.sig b/src/sqlcache.sig new file mode 100644 index 00000000..ccc1741a --- /dev/null +++ b/src/sqlcache.sig @@ -0,0 +1,6 @@ +signature SQLCACHE = sig + +val ffiIndices : int list ref +val go : Mono.file -> Mono.file + +end diff --git a/src/sqlcache.sml b/src/sqlcache.sml new file mode 100644 index 00000000..2e7f6e42 --- /dev/null +++ b/src/sqlcache.sml @@ -0,0 +1,182 @@ +structure Sqlcache :> SQLCACHE = struct + +open Sql +open Mono + +structure IS = IntBinarySet +structure IM = IntBinaryMap +structure StringKey = struct type ord_key = string val compare = String.compare end +structure SS = BinarySetFn (StringKey) +structure SM = BinaryMapFn (StringKey) +structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) + +val ffiIndices : int list ref = ref [] + +val rec tablesRead = + fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) + +val tableWritten = + fn Insert (tab, _) => tab + | Delete (tab, _) => tab + | Update (tab, _, _) => tab + +fun tablesInExp' exp' = + let + val nothing = {read = SS.empty, written = SS.empty} + in + case exp' of + EQuery {query=e, ...} => + (case parse query e of + SOME q => {read = tablesRead q, written = SS.empty} + | NONE => nothing) + | EDml (e, _) => + (case parse dml e of + SOME q => {read = SS.empty, written = SS.singleton (tableWritten q)} + | NONE => nothing) + | _ => nothing + end + +val tablesInExp = + let + fun addTables (exp', {read, written}) = + let val {read = r, written = w} = tablesInExp' exp' + in {read = SS.union (r, read), written = SS.union (w, written)} end + in + MonoUtil.Exp.fold {typ = #2, exp = addTables} + {read = SS.empty, written = SS.empty} + end + +fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) +fun intTyp loc = (TFfi ("Basis", "int"), loc) +fun boolPat (b, loc) = (PCon (Enum, + PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, + con = if b then "True" else "False"}, + NONE), + loc) +fun boolTyp loc = (TFfi ("Basis", "int"), loc) + +fun ffiAppExp (module, func, index, loc) = + (EFfiApp (module, func ^ Int.toString index, []), loc) + +fun sequence (befores, center, afters, loc) = + List.foldr (fn (exp, seq) => (ESeq (exp, seq), loc)) + (List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) + center + afters) + befores + +fun antiguardUnit (cond, exp, loc) = + (ECase (cond, + [(boolPat (false, loc), exp), + (boolPat (true, loc), (ERecord [], loc))], + {disc = boolTyp loc, result = (TRecord [], loc)}), + loc) + +fun underAbs f (exp as (exp', loc)) = + case exp' of + EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) + | _ => f exp + +fun addCacheCheck (index, exp) = + let + fun f (body as (_, loc)) = + let + val check = ffiAppExp ("Cache", "check", index, loc) + val store = ffiAppExp ("Cache", "store", index, loc) + in + antiguardUnit (check, sequence ([], body, [store], loc), loc) + end + in + underAbs f exp + end + +fun addCacheFlush (exp, tablesToIndices) = + let + fun addIndices (table, indices) = IS.union (indices, SIMM.find (tablesToIndices, table)) + fun f (body as (_, loc)) = + let + fun mapFfi func = List.map (fn i => ffiAppExp ("Cache", func, i, loc)) + val flushes = + IS.listItems (SS.foldr addIndices IS.empty (#written (tablesInExp body))) + + in + sequence (mapFfi "flush" flushes, body, mapFfi "ready" flushes, loc) + end + in + underAbs f exp + end + +val handlerIndices = + let + val isUnit = + fn (TRecord [], _) => true + | _ => false + fun maybeAdd (d, soFar as {readers, writers}) = + case d of + DExport (Link ReadOnly, _, name, typs, typ, _) => + if List.all isUnit (typ::typs) + then {readers = IS.add (readers, name), writers = writers} + else soFar + | DExport (_, _, name, _, _, _) => (* Not read only. *) + {readers = readers, writers = IS.add (writers, name)} + | _ => soFar + in + MonoUtil.File.fold {typ = #2, exp = #2, decl = maybeAdd} + {readers = IS.empty, writers = IS.empty} + end + +fun fileFoldMapiSelected f init (file, indices) = + let + fun doExp (original as ((a, index, b, exp, c), state)) = + if IS.member (indices, index) + then let val (newExp, newState) = f (index, exp, state) + in ((a, index, b, newExp, c), newState) end + else original + fun doDecl decl state = + let + val result = + case decl of + DVal x => + let val (y, newState) = doExp (x, state) + in (DVal y, newState) end + | DValRec xs => + let val (ys, newState) = ListUtil.foldlMap doExp state xs + in (DValRec ys, newState) end + | _ => (decl, state) + in + Search.Continue result + end + fun nada x y = Search.Continue (x, y) + in + case MonoUtil.File.mapfold {typ = nada, exp = nada, decl = doDecl} file init of + Search.Continue x => x + | _ => (file, init) (* Should never happen. *) + end + +fun fileMapSelected f = #1 o fileFoldMapiSelected (fn (_, x, _) => (f x, ())) () + +val addCacheChecking = + let + fun f (index, exp, tablesToIndices) = + (addCacheCheck (index, exp), + SS.foldr (fn (table, tsToIs) => SIMM.insert (tsToIs, table, index)) + tablesToIndices + (#read (tablesInExp exp))) + in + fileFoldMapiSelected f (SM.empty) + end + +fun addCacheFlushing (file, tablesToIndices, writers) = + fileMapSelected (fn exp => addCacheFlush (exp, tablesToIndices)) (file, writers) + +fun go file = + let + val {readers, writers} = handlerIndices file + val (fileWithChecks, tablesToIndices) = addCacheChecking (file, readers) + in + ffiIndices := IS.listItems readers; + addCacheFlushing (fileWithChecks, tablesToIndices, writers) + end + +end -- cgit v1.2.3 From 8cf3a275f25ffcbb97d623c4e988fdcc81ef5978 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 15 Sep 2014 20:01:16 -0400 Subject: Small cleanup. --- caching-tests/test.db | Bin 3072 -> 3072 bytes src/cjr_print.sml | 14 ++++++----- src/sql.sig | 6 +---- src/sqlcache.sml | 67 +++++++++++++++++++++++++------------------------- 4 files changed, 42 insertions(+), 45 deletions(-) (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db index 190d2868..a5c91e8f 100644 Binary files a/caching-tests/test.db and b/caching-tests/test.db differ diff --git a/src/cjr_print.sml b/src/cjr_print.sml index b2e8d2a7..8ca35234 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3393,7 +3393,7 @@ fun p_file env (ds, ps) = newline, newline, - (* For caching. *) + (* For sqlcache. *) box (List.map (fn index => let val i = Int.toString index @@ -3403,19 +3403,21 @@ fun p_file env (ds, ps) = newline, string "static uw_Basis_bool uw_Cache_check", string i, - string "(uw_context ctx) { puts(\"Checked cache ", + string "(uw_context ctx) { puts(\"SQLCACHE: checked ", string i, string ".\"); if (cache", string i, string " == NULL) { uw_recordingStart(ctx); return uw_Basis_False; } else { uw_write(ctx, cache", string i, - string "); return uw_Basis_True; } };", + string "); puts(\"SQLCACHE: used ", + string i, + string ".\"); return uw_Basis_True; } };", newline, string "static uw_unit uw_Cache_store", string i, string "(uw_context ctx) { cache", string i, - string " = uw_recordingRead(ctx); puts(\"Stored cache ", + string " = uw_recordingRead(ctx); puts(\"SQLCACHE: stored ", string i, string ".\"); return uw_unit_v; };", newline, @@ -3425,7 +3427,7 @@ fun p_file env (ds, ps) = string i, string "); cache", string i, - string " = NULL; puts(\"Flushed cache ", + string " = NULL; puts(\"SQLCACHE: flushed ", string i, string ".\"); return uw_unit_v; };", newline, @@ -3564,7 +3566,7 @@ fun p_file env (ds, ps) = newline, string ("uw_write_header(ctx, \"Content-Length: " ^ Int.toString (Word8Vector.length (#Bytes r)) ^ "\\r\\n\");"), newline, - string ("uw_write_header(ctx, \"Cache-Control: max-age=31536000, public\\r\\n\");"), + string ("uw_write_header(ctx, \"Cache-Control: max-age=31536000, public\\r\\n\");"), newline, string "uw_replace_page(ctx, \"", string (hexify (#Bytes r)), diff --git a/src/sql.sig b/src/sql.sig index 573a8baf..2623f5e7 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -39,11 +39,7 @@ datatype prop = | Reln of reln * exp list | Cond of exp * prop -datatype chunk = - String of string - | Exp of Mono.exp - -type 'a parser = chunk list -> ('a * chunk list) option +type 'a parser val parse : 'a parser -> Mono.exp -> 'a option diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 2e7f6e42..b01de4c9 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -12,6 +12,37 @@ structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) val ffiIndices : int list ref = ref [] +(* Expression construction utilities. *) + +fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) +fun intTyp loc = (TFfi ("Basis", "int"), loc) +fun boolPat (b, loc) = (PCon (Enum, + PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, + con = if b then "True" else "False"}, + NONE), + loc) +fun boolTyp loc = (TFfi ("Basis", "int"), loc) + +fun ffiAppExp (module, func, index, loc) = + (EFfiApp (module, func ^ Int.toString index, []), loc) + +fun sequence ((exp :: exps), loc) = + List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) exp exps + +fun antiguardUnit (cond, exp, loc) = + (ECase (cond, + [(boolPat (false, loc), exp), + (boolPat (true, loc), (ERecord [], loc))], + {disc = boolTyp loc, result = (TRecord [], loc)}), + loc) + +fun underAbs f (exp as (exp', loc)) = + case exp' of + EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) + | _ => f exp + +(* Program analysis and augmentation. *) + val rec tablesRead = fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) @@ -47,37 +78,6 @@ val tablesInExp = {read = SS.empty, written = SS.empty} end -fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) -fun intTyp loc = (TFfi ("Basis", "int"), loc) -fun boolPat (b, loc) = (PCon (Enum, - PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, - con = if b then "True" else "False"}, - NONE), - loc) -fun boolTyp loc = (TFfi ("Basis", "int"), loc) - -fun ffiAppExp (module, func, index, loc) = - (EFfiApp (module, func ^ Int.toString index, []), loc) - -fun sequence (befores, center, afters, loc) = - List.foldr (fn (exp, seq) => (ESeq (exp, seq), loc)) - (List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) - center - afters) - befores - -fun antiguardUnit (cond, exp, loc) = - (ECase (cond, - [(boolPat (false, loc), exp), - (boolPat (true, loc), (ERecord [], loc))], - {disc = boolTyp loc, result = (TRecord [], loc)}), - loc) - -fun underAbs f (exp as (exp', loc)) = - case exp' of - EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) - | _ => f exp - fun addCacheCheck (index, exp) = let fun f (body as (_, loc)) = @@ -85,7 +85,7 @@ fun addCacheCheck (index, exp) = val check = ffiAppExp ("Cache", "check", index, loc) val store = ffiAppExp ("Cache", "store", index, loc) in - antiguardUnit (check, sequence ([], body, [store], loc), loc) + antiguardUnit (check, sequence ([body, store], loc), loc) end in underAbs f exp @@ -99,9 +99,8 @@ fun addCacheFlush (exp, tablesToIndices) = fun mapFfi func = List.map (fn i => ffiAppExp ("Cache", func, i, loc)) val flushes = IS.listItems (SS.foldr addIndices IS.empty (#written (tablesInExp body))) - in - sequence (mapFfi "flush" flushes, body, mapFfi "ready" flushes, loc) + sequence (mapFfi "flush" flushes @ [body] @ mapFfi "ready" flushes, loc) end in underAbs f exp -- cgit v1.2.3 From 75d1eedd15edc41b1c2bc9d1fce7a74f37bd78a1 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 14 Oct 2014 18:05:09 -0400 Subject: Complete overhaul: cache queries based on immediate query result, not eventual HTML output. --- caching-tests/test.db | Bin 3072 -> 5120 bytes caching-tests/test.sql | 7 +- caching-tests/test.ur | 74 +++++++++----- caching-tests/test.urp | 1 + caching-tests/test.urs | 2 + src/cjr_print.sml | 70 +++++++++---- src/compiler.sig | 1 - src/compiler.sml | 6 +- src/monoize.sig | 2 +- src/monoize.sml | 24 +++-- src/multimap_fn.sml | 10 +- src/settings.sig | 3 + src/settings.sml | 4 + src/sources | 2 + src/sql.sig | 2 + src/sql.sml | 20 +++- src/sqlcache.sml | 266 +++++++++++++++++++++++++++++++++++++++++++++---- 17 files changed, 411 insertions(+), 83 deletions(-) (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db index a5c91e8f..944aa851 100644 Binary files a/caching-tests/test.db and b/caching-tests/test.db differ diff --git a/caching-tests/test.sql b/caching-tests/test.sql index 862245b7..efa271ec 100644 --- a/caching-tests/test.sql +++ b/caching-tests/test.sql @@ -8,4 +8,9 @@ CREATE TABLE uw_Test_foo01(uw_id integer NOT NULL, uw_bar text NOT NULL, ); - \ No newline at end of file + CREATE TABLE uw_Test_tab(uw_id integer NOT NULL, uw_val integer NOT NULL, + PRIMARY KEY (uw_id) + + ); + + \ No newline at end of file diff --git a/caching-tests/test.ur b/caching-tests/test.ur index a99a387b..cb391da7 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -1,52 +1,74 @@ table foo01 : {Id : int, Bar : string} PRIMARY KEY Id table foo10 : {Id : int, Bar : string} PRIMARY KEY Id +table tab : {Id : int, Val : int} PRIMARY KEY Id -fun flush01 () : transaction page = - dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); - return - Flushed 1! - - -fun flush10 () : transaction page = - dml (UPDATE foo10 SET Bar = "baz10" WHERE Id = 42); - return - Flushed 2! - - -fun flush11 () : transaction page = - dml (UPDATE foo01 SET Bar = "baz11" WHERE Id = 42); - dml (UPDATE foo10 SET Bar = "baz11" WHERE Id = 42); - return - Flushed 1 and 2! - - -fun cache01 () : transaction page = +fun cache01 () = res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); return Reading 1. {case res of - None => + None => ? | Some row => {[row.Foo01.Bar]}} -fun cache10 () : transaction page = +fun cache10 () = res <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); return Reading 2. {case res of - None => + None => ? | Some row => {[row.Foo10.Bar]}} -fun cache11 () : transaction page = +fun cache11 () = res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); bla <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); return Reading 1 and 2. {case res of - None => + None => ? | Some row => {[row.Foo01.Bar]}} {case bla of - None => + None => ? | Some row => {[row.Foo10.Bar]}} + +fun flush01 () = + dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); + return + Flushed 1! + + +fun flush10 () = + dml (UPDATE foo10 SET Bar = "baz10" WHERE Id = 42); + return + Flushed 2! + + +fun flush11 () = + dml (UPDATE foo01 SET Bar = "baz11" WHERE Id = 42); + dml (UPDATE foo10 SET Bar = "baz11" WHERE Id = 42); + return + Flushed 1 and 2! + + +fun cache id = + res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); + return + Reading {[id]}. + {case res of + None => ? + | Some row => {[row.Tab.Val]}} + + +fun flush id = + res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); + dml (case res of + None => (INSERT INTO tab (Id, Val) VALUES ({[id]}, 0)) + | Some row => (UPDATE tab SET Val = {[row.Tab.Val + 1]} WHERE Id = {[id]})); + return + (* Flushed {[id]}! *) + {case res of + None => Initialized {[id]}! + | Some row => Incremented {[id]}!} + diff --git a/caching-tests/test.urp b/caching-tests/test.urp index 123f58e5..7ac469f9 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -3,5 +3,6 @@ sql test.sql safeGet Test/flush01 safeGet Test/flush10 safeGet Test/flush11 +safeGet Test/flush test diff --git a/caching-tests/test.urs b/caching-tests/test.urs index ce7d0350..ace4ba28 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -4,3 +4,5 @@ val cache11 : unit -> transaction page val flush01 : unit -> transaction page val flush10 : unit -> transaction page val flush11 : unit -> transaction page +val cache : int -> transaction page +val flush : int -> transaction page diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 8ca35234..6427cf3d 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3395,49 +3395,77 @@ fun p_file env (ds, ps) = (* For sqlcache. *) box (List.map - (fn index => + (fn {index, params} => let val i = Int.toString index + fun paramRepeat itemi sep = + let + val rec f = + fn 0 => itemi (Int.toString 0) + | n => f (n-1) ^ itemi (Int.toString n) + in + f (params - 1) + end + val args = paramRepeat (fn p => "uw_Basis_string p" ^ p) ", " + val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" ^ p ^ " = NULL;") "\n" + val sets = paramRepeat (fn p => "param" ^ i ^ "_" ^ p + ^ " = strdup(p" ^ p ^ ");") "\n" + val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") "\n" + val eqs = paramRepeat (fn p => "strcmp(param" ^ i ^ "_" ^ p + ^ ", p" ^ p ^ ")") " || " in box [string "static char *cache", string i, string " = NULL;", newline, - string "static uw_Basis_bool uw_Cache_check", - string i, - string "(uw_context ctx) { puts(\"SQLCACHE: checked ", + string decls, + newline, + string "static uw_Basis_string uw_Sqlcache_check", string i, - string ".\"); if (cache", + string "(uw_context ctx, ", + string args, + string ") {\n puts(\"SQLCACHE: checked ", string i, - string " == NULL) { uw_recordingStart(ctx); return uw_Basis_False; } else { uw_write(ctx, cache", + string ".\");\n if (cache", string i, - string "); puts(\"SQLCACHE: used ", + (* ASK: is returning the pointer okay? Should we duplicate? *) + string " == NULL || ", + string eqs, + string ") {\n puts(\"miss D:\"); puts(p0);\n return NULL;\n } else {\n puts(\"hit :D\");\n return cache", string i, - string ".\"); return uw_Basis_True; } };", + string ";\n } };", newline, - string "static uw_unit uw_Cache_store", + string "static uw_unit uw_Sqlcache_store", string i, - string "(uw_context ctx) { cache", + string "(uw_context ctx, uw_Basis_string s, ", + string args, + string ") {\n free(cache", string i, - string " = uw_recordingRead(ctx); puts(\"SQLCACHE: stored ", + string ");", + newline, + string frees, + newline, + string "cache", string i, - string ".\"); return uw_unit_v; };", + string " = strdup(s);", + newline, + string sets, newline, - string "static uw_unit uw_Cache_flush", + string "puts(\"SQLCACHE: stored ", string i, - string "(uw_context ctx) { free(cache", + string ".\"); puts(p0);\n return uw_unit_v;\n };", + newline, + string "static uw_unit uw_Sqlcache_flush", string i, - string "); cache", + string "(uw_context ctx) {\n free(cache", string i, - string " = NULL; puts(\"SQLCACHE: flushed ", + string ");\n cache", string i, - string ".\"); return uw_unit_v; };", - newline, - string "static uw_unit uw_Cache_ready", + string " = NULL;\n puts(\"SQLCACHE: flushed ", string i, - string "(uw_context ctx) { return uw_unit_v; };", + string ".\");\n return uw_unit_v;\n };", newline, newline] end) - (!Sqlcache.ffiIndices)), + (Sqlcache.getFfiInfo ())), newline, p_list_sep newline (fn x => x) pds, diff --git a/src/compiler.sig b/src/compiler.sig index fb0245ea..c154240a 100644 --- a/src/compiler.sig +++ b/src/compiler.sig @@ -199,7 +199,6 @@ signature COMPILER = sig val enableBoot : unit -> unit val doIflow : bool ref - val doSqlcache : bool ref val addPath : string * string -> unit val addModuleRoot : string * string -> unit diff --git a/src/compiler.sml b/src/compiler.sml index d7ee8700..fc4067a4 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -83,7 +83,6 @@ type ('src, 'dst) transform = { val debug = ref false val dumpSource = ref false val doIflow = ref false -val doSqlcache = ref false val doDumpSource = ref (fn () => ()) @@ -1457,7 +1456,10 @@ val sigcheck = { val toSigcheck = transform sigcheck "sigcheck" o toSidecheck val sqlcache = { - func = (fn file => (if !doSqlcache then Sqlcache.go file else file)), + func = (fn file => + if Settings.getSqlcache () + then let val file = MonoInline.inlineFull file in Sqlcache.go file end + else file), print = MonoPrint.p_file MonoEnv.empty } diff --git a/src/monoize.sig b/src/monoize.sig index 838d7c4c..951db01b 100644 --- a/src/monoize.sig +++ b/src/monoize.sig @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN diff --git a/src/monoize.sml b/src/monoize.sml index 6073a21f..d609a67d 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -1957,20 +1957,26 @@ fun monoExp (env, st, fm) (all as (e, loc)) = (L'.TFun (un, state), loc)), loc)), loc) - val body' = (L'.EApp ( + val body'' = (L'.EApp ( (L'.EApp ( (L'.EApp ((L'.ERel 4, loc), (L'.ERel 1, loc)), loc), (L'.ERel 0, loc)), loc), (L'.ERecord [], loc)), loc) - - val body = (L'.EQuery {exps = exps, - tables = tables, - state = state, - query = (L'.ERel 3, loc), - body = body', - initial = (L'.ERel 1, loc)}, - loc) + val body' = (L'.EQuery {exps = exps, + tables = tables, + state = state, + query = (L'.ERel 3, loc), + body = body'', + initial = (L'.ERel 1, loc)}, + loc) + val (body, fm) = if Settings.getSqlcache () then + let + val (urlifiedRel0, fm) = urlifyExp env fm ((L'.ERel 0, loc), state) + in + (Sqlcache.instrumentQuery (body', urlifiedRel0), fm) + end + else (body', fm) in ((L'.EAbs ("q", s, (L'.TFun (ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc)), loc), (L'.EAbs ("f", ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc), diff --git a/src/multimap_fn.sml b/src/multimap_fn.sml index 585b741f..3dab68a5 100644 --- a/src/multimap_fn.sml +++ b/src/multimap_fn.sml @@ -1,14 +1,16 @@ functor MultimapFn (structure KeyMap : ORD_MAP structure ValSet : ORD_SET) = struct type key = KeyMap.Key.ord_key type item = ValSet.item - type items = ValSet.set + type itemSet = ValSet.set type multimap = ValSet.set KeyMap.map - fun inserts (kToVs : multimap, k : key, vs : items) : multimap = + val empty : multimap = KeyMap.empty + fun insertSet (kToVs : multimap, k : key, vs : itemSet) : multimap = KeyMap.unionWith ValSet.union (kToVs, KeyMap.singleton (k, vs)) fun insert (kToVs : multimap, k : key, v : item) : multimap = - inserts (kToVs, k, ValSet.singleton v) - fun find (kToVs : multimap, k : key) = + insertSet (kToVs, k, ValSet.singleton v) + fun findSet (kToVs : multimap, k : key) = case KeyMap.find (kToVs, k) of SOME vs => vs | NONE => ValSet.empty + val findList : multimap * key -> item list = ValSet.listItems o findSet end diff --git a/src/settings.sig b/src/settings.sig index 9b32e502..e94832e0 100644 --- a/src/settings.sig +++ b/src/settings.sig @@ -279,6 +279,9 @@ signature SETTINGS = sig val setLessSafeFfi : bool -> unit val getLessSafeFfi : unit -> bool + val setSqlcache : bool -> unit + val getSqlcache : unit -> bool + val setFilePath : string -> unit (* Sets the directory where we look for files being added below. *) diff --git a/src/settings.sml b/src/settings.sml index eb350c95..81c33c08 100644 --- a/src/settings.sml +++ b/src/settings.sml @@ -744,6 +744,10 @@ val less = ref false fun setLessSafeFfi b = less := b fun getLessSafeFfi () = !less +val sqlcache = ref false +fun setSqlcache b = sqlcache := b +fun getSqlcache () = !sqlcache + structure SM = BinaryMapFn(struct type ord_key = string val compare = String.compare diff --git a/src/sources b/src/sources index 8860b310..518b7484 100644 --- a/src/sources +++ b/src/sources @@ -212,6 +212,8 @@ $(SRC)/multimap_fn.sml $(SRC)/sqlcache.sig $(SRC)/sqlcache.sml +$(SRC)/mono_inline.sml + $(SRC)/cjr.sml $(SRC)/postgres.sig diff --git a/src/sql.sig b/src/sql.sig index 2623f5e7..2aba8383 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -2,6 +2,8 @@ signature SQL = sig val debug : bool ref +val sqlcacheMode : bool ref + type lvar = int datatype func = diff --git a/src/sql.sml b/src/sql.sml index 8d245660..d38de055 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -270,6 +270,22 @@ fun sqlify chs = | _ => NONE +fun sqlifySqlcache chs = + case chs of + (* Match entire FFI application, not just its argument. *) + Exp (e' as EFfiApp ("Basis", f, [(_, _)]), _) :: chs => + if String.isPrefix "sqlify" f then + SOME ((e', ErrorMsg.dummySpan), chs) + else + NONE + | Exp (ECase (e, [((PCon (_, PConFfi {mod = "Basis", con = "True", ...}, NONE), _), + (EPrim (Prim.String (Prim.Normal, "TRUE")), _)), + ((PCon (_, PConFfi {mod = "Basis", con = "False", ...}, NONE), _), + (EPrim (Prim.String (Prim.Normal, "FALSE")), _))], _), _) :: chs => + SOME (e, chs) + + | _ => NONE + fun constK s = wrap (const s) (fn () => s) val funcName = altL [constK "COUNT", @@ -281,6 +297,8 @@ val funcName = altL [constK "COUNT", val unmodeled = altL [const "COUNT(*)", const "CURRENT_TIMESTAMP"] +val sqlcacheMode = ref false; + fun sqexp chs = log "sqexp" (altL [wrap prim SqConst, @@ -292,7 +310,7 @@ fun sqexp chs = wrap known SqKnown, wrap func SqFunc, wrap unmodeled (fn () => Unmodeled), - wrap sqlify Inj, + wrap (if !sqlcacheMode then sqlifySqlcache else sqlify) Inj, wrap (follow (const "COALESCE(") (follow sqexp (follow (const ",") (follow (keep (fn ch => ch <> #")")) (const ")"))))) (fn ((), (e, _)) => e), diff --git a/src/sqlcache.sml b/src/sqlcache.sml index b01de4c9..563b2162 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,21 +1,247 @@ -structure Sqlcache :> SQLCACHE = struct +structure Sqlcache (* :> SQLCACHE *) = struct open Sql open Mono structure IS = IntBinarySet structure IM = IntBinaryMap -structure StringKey = struct type ord_key = string val compare = String.compare end -structure SS = BinarySetFn (StringKey) -structure SM = BinaryMapFn (StringKey) -structure SIMM = MultimapFn (structure KeyMap = SM structure ValSet = IS) +structure SK = struct type ord_key = string val compare = String.compare end +structure SS = BinarySetFn(SK) +structure SM = BinaryMapFn(SK) +structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) -val ffiIndices : int list ref = ref [] +(* Filled in by cacheWrap during Sqlcache. *) +val ffiInfo : {index : int, params : int} list ref = ref [] -(* Expression construction utilities. *) +fun getFfiInfo () = !ffiInfo + +(* Program analysis. *) + +val useInjIfPossible = + fn SqConst prim => Inj (EPrim (Prim.String (Prim.Normal, Prim.toString prim)), ErrorMsg.dummySpan) + | sqexp => sqexp + +fun equalities (canonicalTable : string -> string) : + sqexp -> ((string * string) * Mono.exp) list option = + let + val rec eqs = + fn Binop (Exps f, e1, e2) => + (* TODO: use a custom datatype in Exps instead of a function. *) + (case f (Var 1, Var 2) of + Reln (Eq, [Var 1, Var 2]) => + let + val (e1', e2') = (useInjIfPossible e1, useInjIfPossible e2) + in + case (e1', e2') of + (Field (t, f), Inj i) => SOME [((canonicalTable t, f), i)] + | (Inj i, Field (t, f)) => SOME [((canonicalTable t, f), i)] + | _ => NONE + end + | _ => NONE) + | Binop (Props f, e1, e2) => + (* TODO: use a custom datatype in Props instead of a function. *) + (case f (True, False) of + And (True, False) => + (case (eqs e1, eqs e2) of + (SOME eqs1, SOME eqs2) => SOME (eqs1 @ eqs2) + | _ => NONE) + | _ => NONE) + | _ => NONE + in + eqs + end + +val equalitiesQuery = + fn Query1 {From = tablePairs, Where = SOME exp, ...} => + equalities + (* If we have [SELECT ... FROM T AS T' ...], use T, not T'. *) + (fn t => + case List.find (fn (_, tAs) => t = tAs) tablePairs of + NONE => t + | SOME (tOrig, _) => tOrig) + exp + | Query1 {Where = NONE, ...} => SOME [] + | _ => NONE + +val equalitiesDml = + fn Insert (tab, eqs) => SOME (List.mapPartial + (fn (name, sqexp) => + case useInjIfPossible sqexp of + Inj e => SOME ((tab, name), e) + | _ => NONE) + eqs) + | Delete (tab, exp) => equalities (fn _ => tab) exp + (* TODO: examine the updated values and not just the way they're filtered. *) + (* For example, UPDATE foo SET Id = 9001 WHERE Id = 42 should update both the + Id = 42 and Id = 9001 cache entries. Could also think of it as doing a + Delete immediately followed by an Insert. *) + | Update (tab, _, exp) => equalities (fn _ => tab) exp + +val rec tablesQuery = + fn Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Union (q1, q2) => SS.union (tablesQuery q1, tablesQuery q2) + +val tableDml = + fn Insert (tab, _) => tab + | Delete (tab, _) => tab + | Update (tab, _, _) => tab + + +(* Program instrumentation. *) + +val stringTyp = (TFfi ("Basis", "string"), ErrorMsg.dummySpan) + +val sequence = + fn (exp :: exps) => + let + val loc = ErrorMsg.dummySpan + in + List.foldl (fn (e', seq) => ESeq ((seq, loc), (e', loc))) exp exps + end + | _ => raise Match + +fun ffiAppCache' (func, index, args) : Mono.exp' = + EFfiApp ("Sqlcache", func ^ Int.toString index, args) + +fun ffiAppCache (func, index, args) : Mono. exp = + (ffiAppCache' (func, index, args), ErrorMsg.dummySpan) + +val varPrefix = "queryResult" + +fun indexOfName varName = + if String.isPrefix varPrefix varName + then Int.fromString (String.extract (varName, String.size varPrefix, NONE)) + else NONE + +val incRels = MonoUtil.Exp.map {typ = fn x => x, exp = fn ERel n => ERel (n + 1) | x => x} + +(* Filled in by instrumentQuery during Monoize, used during Sqlcache. *) +val urlifiedRel0s : Mono.exp IM.map ref = ref IM.empty + +(* Used by Monoize. *) +val instrumentQuery = + let + val nextQuery = ref 0 + fun iq (query, urlifiedRel0) = + case query of + (EQuery {state = typ, ...}, loc) => + let + val i = !nextQuery before nextQuery := !nextQuery + 1 + in + urlifiedRel0s := IM.insert (!urlifiedRel0s, i, urlifiedRel0); + (* ASK: name variables properly? *) + (ELet (varPrefix ^ Int.toString i, typ, query, + (* Uses a dummy FFI call to keep the urlified expression around, which + in turn keeps the declarations required for urlification safe from + MonoShake. The dummy call is removed during Sqlcache. *) + (* ASK: is there a better way? *) + (ESeq ((EFfiApp ("Sqlcache", "dummy", [(urlifiedRel0, stringTyp)]), loc), + (ERel 0, loc)), + loc)), + loc) + end + | _ => raise Match + in + iq + end + +val gunk : ((string * string) * Mono.exp) list list ref = ref [[]] + +fun cacheWrap (query, i, urlifiedRel0, eqs) = + case query of + (EQuery {state = typ, ...}, _) => + let + val loc = ErrorMsg.dummySpan + (* TODO: deal with effectful injected expressions. *) + val args = (ffiInfo := {index = i, params = length eqs} :: !ffiInfo; + map (fn (_, e) => (e, stringTyp)) eqs) before gunk := eqs :: !gunk + val argsInc = map (fn (e, t) => (incRels e, t)) args + in + (ECase (ffiAppCache ("check", i, args), + [((PNone stringTyp, loc), + (ELet ("q", typ, query, + (ESeq (ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argsInc), + (ERel 0, loc)), + loc)), + loc)), + ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), + (* ASK: what does this bool do? *) + (EUnurlify ((ERel 0, loc), typ, false), loc))], + {disc = stringTyp, result = typ}), + loc) + end + | _ => raise Match + +fun fileMapfold doExp file start = + case MonoUtil.File.mapfold {typ = Search.return2, + exp = fn x => (fn s => Search.Continue (doExp x s)), + decl = Search.return2} file start of + Search.Continue x => x + | Search.Return _ => raise Match + +fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file ()) + +val addChecking = + let + fun doExp queryInfo = + fn e' as ELet (v, t, queryExp as (EQuery {query = queryText, ...}, _), body) => + let + fun bind x f = Option.mapPartial f x + val attempt = + (* Ziv misses Haskell's do notation.... *) + bind (parse query queryText) (fn queryParsed => + (Print.preface ("gunk> ", (MonoPrint.p_exp MonoEnv.empty queryExp)); + bind (indexOfName v) (fn i => + bind (equalitiesQuery queryParsed) (fn eqs => + bind (IM.find (!urlifiedRel0s, i)) (fn urlifiedRel0 => + SOME (ELet (v, t, cacheWrap (queryExp, i, urlifiedRel0, eqs), body), + SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, i)) + queryInfo + (tablesQuery queryParsed))))))) + in + case attempt of + SOME pair => pair + | NONE => (e', queryInfo) + end + | ESeq ((EFfiApp ("Sqlcache", "dummy", _), _), (e', _)) => (e', queryInfo) + | e' => (e', queryInfo) + in + fn file => fileMapfold (fn exp => fn state => doExp state exp) file SIMM.empty + end + +fun addFlushing (file, queryInfo) = + let + val allIndices : int list = SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] queryInfo + fun flushes indices = map (fn i => ffiAppCache' ("flush", i, [])) indices + val doExp = + fn dmlExp as EDml (dmlText, _) => + let + val indices = + case parse dml dmlText of + SOME dmlParsed => SIMM.findList (queryInfo, tableDml dmlParsed) + | NONE => allIndices + in + sequence (flushes indices @ [dmlExp]) + end + | e' => e' + in + fileMap doExp file + end + +fun go file = + let + val () = Sql.sqlcacheMode := true + in + addFlushing (addChecking file) before Sql.sqlcacheMode := false + end + + +(* BEGIN OLD fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) fun intTyp loc = (TFfi ("Basis", "int"), loc) +fun stringExp (s, loc) = (EPrim (Prim.String (Prim.Normal, s)), loc) + fun boolPat (b, loc) = (PCon (Enum, PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, con = if b then "True" else "False"}, @@ -23,11 +249,13 @@ fun boolPat (b, loc) = (PCon (Enum, loc) fun boolTyp loc = (TFfi ("Basis", "int"), loc) -fun ffiAppExp (module, func, index, loc) = - (EFfiApp (module, func ^ Int.toString index, []), loc) +fun ffiAppExp (module, func, index, args, loc) = + (EFfiApp (module, func ^ Int.toString index, args), loc) -fun sequence ((exp :: exps), loc) = +val sequence = + fn ((exp :: exps), loc) => List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) exp exps + | _ => raise Match fun antiguardUnit (cond, exp, loc) = (ECase (cond, @@ -41,11 +269,10 @@ fun underAbs f (exp as (exp', loc)) = EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) | _ => f exp -(* Program analysis and augmentation. *) val rec tablesRead = - fn Query1 {From=tablePairs, ...} => SS.fromList (map #1 tablePairs) - | Union (q1,q2) => SS.union (tablesRead q1, tablesRead q2) + fn Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Union (q1, q2) => SS.union (tablesRead q1, tablesRead q2) val tableWritten = fn Insert (tab, _) => tab @@ -57,7 +284,7 @@ fun tablesInExp' exp' = val nothing = {read = SS.empty, written = SS.empty} in case exp' of - EQuery {query=e, ...} => + EQuery {query = e, ...} => (case parse query e of SOME q => {read = tablesRead q, written = SS.empty} | NONE => nothing) @@ -71,8 +298,11 @@ fun tablesInExp' exp' = val tablesInExp = let fun addTables (exp', {read, written}) = - let val {read = r, written = w} = tablesInExp' exp' - in {read = SS.union (r, read), written = SS.union (w, written)} end + let + val {read = r, written = w} = tablesInExp' exp' + in + {read = SS.union (r, read), written = SS.union (w, written)} + end in MonoUtil.Exp.fold {typ = #2, exp = addTables} {read = SS.empty, written = SS.empty} @@ -150,7 +380,7 @@ fun fileFoldMapiSelected f init (file, indices) = in case MonoUtil.File.mapfold {typ = nada, exp = nada, decl = doDecl} file init of Search.Continue x => x - | _ => (file, init) (* Should never happen. *) + | _ => raise Match (* Should never happen. *) end fun fileMapSelected f = #1 o fileFoldMapiSelected (fn (_, x, _) => (f x, ())) () @@ -178,4 +408,6 @@ fun go file = addCacheFlushing (fileWithChecks, tablesToIndices, writers) end +END OLD *) + end -- cgit v1.2.3 From 0185025d29459fe681afa1c01faa22a5d8034884 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 14 Oct 2014 18:07:09 -0400 Subject: Add mono_inline.sml (which was left out of last commit). --- src/mono_inline.sml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 src/mono_inline.sml (limited to 'src') diff --git a/src/mono_inline.sml b/src/mono_inline.sml new file mode 100644 index 00000000..d23419f3 --- /dev/null +++ b/src/mono_inline.sml @@ -0,0 +1,28 @@ +structure MonoInline = struct + +fun inlineFull file = + let + val oldInline = Settings.getMonoInline () + val oldFull = !MonoReduce.fullMode + in + (Settings.setMonoInline (case Int.maxInt of + NONE => 1000000 + | SOME n => n); + MonoReduce.fullMode := true; + let + val file = MonoReduce.reduce file + val file = MonoOpt.optimize file + val file = Fuse.fuse file + val file = MonoOpt.optimize file + val file = MonoShake.shake file + in + file + end before + (MonoReduce.fullMode := oldFull; + Settings.setMonoInline oldInline)) + handle ex => (Settings.setMonoInline oldInline; + MonoReduce.fullMode := oldFull; + raise ex) + end + +end -- cgit v1.2.3 From 7b94f3433f47e4e5010dc2af6010181da49637e8 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 31 Oct 2014 09:25:03 -0400 Subject: Mostly finish effectfulness analysis. --- caching-tests/test.db | Bin 5120 -> 5120 bytes caching-tests/test.ur | 7 +- src/cjr_print.sml | 29 +++++-- src/main.mlton.sml | 3 +- src/sources | 16 ++-- src/sql.sig | 6 ++ src/sql.sml | 8 +- src/sqlcache.sml | 225 ++++++++++++++++++++++++++++++++++++++++++++------ 8 files changed, 242 insertions(+), 52 deletions(-) (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db index 944aa851..66b6ad88 100644 Binary files a/caching-tests/test.db and b/caching-tests/test.db differ diff --git a/caching-tests/test.ur b/caching-tests/test.ur index cb391da7..06ed456c 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -12,12 +12,11 @@ fun cache01 () = fun cache10 () = - res <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); + res <- queryX (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42) + (fn row => {[row.Foo10.Bar]}); return Reading 2. - {case res of - None => ? - | Some row => {[row.Foo10.Bar]}} + {res} fun cache11 () = diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 6427cf3d..c150631c 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3394,6 +3394,7 @@ fun p_file env (ds, ps) = newline, (* For sqlcache. *) + (* TODO: also record between Cache.check and Cache.store. *) box (List.map (fn {index, params} => let val i = Int.toString index @@ -3412,7 +3413,11 @@ fun p_file env (ds, ps) = val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") "\n" val eqs = paramRepeat (fn p => "strcmp(param" ^ i ^ "_" ^ p ^ ", p" ^ p ^ ")") " || " - in box [string "static char *cache", + in box [string "static char *cacheQuery", + string i, + string " = NULL;", + newline, + string "static char *cacheWrite", string i, string " = NULL;", newline, @@ -3424,12 +3429,14 @@ fun p_file env (ds, ps) = string args, string ") {\n puts(\"SQLCACHE: checked ", string i, - string ".\");\n if (cache", + string ".\");\n if (cacheQuery", string i, (* ASK: is returning the pointer okay? Should we duplicate? *) string " == NULL || ", string eqs, - string ") {\n puts(\"miss D:\"); puts(p0);\n return NULL;\n } else {\n puts(\"hit :D\");\n return cache", + string ") {\n puts(\"miss D:\");\n uw_recordingStart(ctx);\n return NULL;\n } else {\n puts(\"hit :D\");\n uw_write(ctx, cacheWrite", + string i, + string ");\n return cacheQuery", string i, string ";\n } };", newline, @@ -3437,27 +3444,31 @@ fun p_file env (ds, ps) = string i, string "(uw_context ctx, uw_Basis_string s, ", string args, - string ") {\n free(cache", + string ") {\n free(cacheQuery", + string i, + string "); free(cacheWrite", string i, string ");", newline, string frees, newline, - string "cache", + string "cacheQuery", + string i, + string " = strdup(s); cacheWrite", string i, - string " = strdup(s);", + string " = uw_recordingRead(ctx);", newline, string sets, newline, string "puts(\"SQLCACHE: stored ", string i, - string ".\"); puts(p0);\n return uw_unit_v;\n };", + string ".\");\n return uw_unit_v;\n };", newline, string "static uw_unit uw_Sqlcache_flush", string i, - string "(uw_context ctx) {\n free(cache", + string "(uw_context ctx) {\n free(cacheQuery", string i, - string ");\n cache", + string ");\n cacheQuery", string i, string " = NULL;\n puts(\"SQLCACHE: flushed ", string i, diff --git a/src/main.mlton.sml b/src/main.mlton.sml index 5ecd7290..3ae968b0 100644 --- a/src/main.mlton.sml +++ b/src/main.mlton.sml @@ -47,7 +47,6 @@ fun oneRun args = Elaborate.unifyMore := false; Compiler.dumpSource := false; Compiler.doIflow := false; - Compiler.doSqlcache := false; Demo.noEmacs := false; Settings.setDebug false) @@ -161,7 +160,7 @@ fun oneRun args = (Compiler.doIflow := true; doArgs rest) | "-sqlcache" :: rest => - (Compiler.doSqlcache := true; + (Settings.setSqlcache true; doArgs rest) | "-moduleOf" :: fname :: _ => (print (Compiler.moduleOf fname ^ "\n"); diff --git a/src/sources b/src/sources index 518b7484..7ad60517 100644 --- a/src/sources +++ b/src/sources @@ -168,6 +168,14 @@ $(SRC)/mono_env.sml $(SRC)/mono_print.sig $(SRC)/mono_print.sml +$(SRC)/sql.sig +$(SRC)/sql.sml + +$(SRC)/multimap_fn.sml + +$(SRC)/sqlcache.sig +$(SRC)/sqlcache.sml + $(SRC)/monoize.sig $(SRC)/monoize.sml @@ -186,9 +194,6 @@ $(SRC)/mono_shake.sml $(SRC)/fuse.sig $(SRC)/fuse.sml -$(SRC)/sql.sig -$(SRC)/sql.sml - $(SRC)/iflow.sig $(SRC)/iflow.sml @@ -207,11 +212,6 @@ $(SRC)/sidecheck.sml $(SRC)/sigcheck.sig $(SRC)/sigcheck.sml -$(SRC)/multimap_fn.sml - -$(SRC)/sqlcache.sig -$(SRC)/sqlcache.sml - $(SRC)/mono_inline.sml $(SRC)/cjr.sml diff --git a/src/sql.sig b/src/sql.sig index 2aba8383..cf2ae14a 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -4,6 +4,12 @@ val debug : bool ref val sqlcacheMode : bool ref +datatype chunk = + String of string + | Exp of Mono.exp + +val chunkify : Mono.exp -> chunk list + type lvar = int datatype func = diff --git a/src/sql.sml b/src/sql.sml index d38de055..7cfed022 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -272,10 +272,12 @@ fun sqlify chs = fun sqlifySqlcache chs = case chs of - (* Match entire FFI application, not just its argument. *) - Exp (e' as EFfiApp ("Basis", f, [(_, _)]), _) :: chs => + (* Could have variables as well as FFIs. *) + Exp (e as (ERel _, _)) :: chs => SOME (e, chs) + (* If it is an FFI, match the entire expression. *) + | Exp (e as (EFfiApp ("Basis", f, [(_, _)]), _)) :: chs => if String.isPrefix "sqlify" f then - SOME ((e', ErrorMsg.dummySpan), chs) + SOME (e, chs) else NONE | Exp (ECase (e, [((PCon (_, PConFfi {mod = "Basis", con = "True", ...}, NONE), _), diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 563b2162..d3c21371 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -15,10 +15,127 @@ val ffiInfo : {index : int, params : int} list ref = ref [] fun getFfiInfo () = !ffiInfo -(* Program analysis. *) +(* Some FFIs have writing as their only effect, which the caching records. *) +val ffiEffectful = + let + val fs = SS.fromList ["htmlifyInt_w", + "htmlifyFloat_w", + "htmlifyString_w", + "htmlifyBool_w", + "htmlifyTime_w", + "attrifyInt_w", + "attrifyFloat_w", + "attrifyString_w", + "attrifyChar_w", + "urlifyInt_w", + "urlifyFloat_w", + "urlifyString_w", + "urlifyBool_w", + "urlifyChannel_w"] + in + fn (m, f) => Settings.isEffectful (m, f) + andalso not (m = "Basis" andalso SS.member (fs, f)) + end + + +(* Effect analysis. *) + +(* Makes an exception for EWrite (which is recorded when caching). *) +fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : Mono.exp -> bool = + (* If result is true, expression is definitely effectful. If result is + false, then expression is definitely not effectful if effs is fully + populated. The intended pattern is to use this a number of times equal + to the number of declarations in a file, Bellman-Ford style. *) + (* TODO: make incrementing of bound less janky, probably by using MonoUtil + instead of all this. *) + let + (* DEBUG: remove printing when done. *) + fun tru msg = if doPrint then (print (msg ^ "\n"); true) else true + val rec eff' = + (* ASK: is there a better way? *) + fn EPrim _ => false + (* We don't know if local functions have effects when applied. *) + | ERel idx => if inFunction andalso idx >= bound + then tru ("rel" ^ Int.toString idx) else false + | ENamed name => if IS.member (effs, name) then tru "named" else false + | ECon (_, _, NONE) => false + | ECon (_, _, SOME e) => eff e + | ENone _ => false + | ESome (_, e) => eff e + (* TODO: use FFI whitelist. *) + | EFfi (m, f) => if ffiEffectful (m, f) then tru "ffi" else false + | EFfiApp (m, f, _) => if ffiEffectful (m, f) then tru "ffiapp" else false + (* ASK: we're calling functions effectful if they have effects when + applied or if the function expressions themselves have effects. + Is that okay? *) + (* This is okay because the values we ultimately care about aren't + functions, and this is a conservative approximation, anyway. *) + | EApp (eFun, eArg) => effectful doPrint effs true bound eFun orelse eff eArg + | EAbs (_, _, _, e) => effectful doPrint effs inFunction (bound+1) e + | EUnop (_, e) => eff e + | EBinop (_, _, e1, e2) => eff e1 orelse eff e2 + | ERecord xs => List.exists (fn (_, e, _) => eff e) xs + | EField (e, _) => eff e + (* If any case could be effectful, consider it effectful. *) + | ECase (e, xs, _) => eff e orelse List.exists (fn (_, e) => eff e) xs + | EStrcat (e1, e2) => eff e1 orelse eff e2 + (* ASK: how should we treat these three? *) + | EError _ => tru "error" + | EReturnBlob _ => tru "blob" + | ERedirect _ => tru "redirect" + (* EWrite is a special exception because we record writes when caching. *) + | EWrite _ => false + | ESeq (e1, e2) => eff e1 orelse eff e2 + (* TODO: keep context of which local variables aren't effectful? Only + makes a difference for function expressions, though. *) + | ELet (_, _, eBind, eBody) => eff eBind orelse + effectful doPrint effs inFunction (bound+1) eBody + | EClosure (_, es) => List.exists eff es + (* TODO: deal with EQuery. *) + | EQuery _ => tru "query" + | EDml _ => tru "dml" + | ENextval _ => tru "nextval" + | ESetval _ => tru "setval" + | EUnurlify (e, _, _) => eff e + (* ASK: how should we treat this? *) + | EJavaScript _ => tru "javascript" + (* ASK: these are all effectful, right? *) + | ESignalReturn _ => tru "signalreturn" + | ESignalBind _ => tru "signalbind" + | ESignalSource _ => tru "signalsource" + | EServerCall _ => tru "servercall" + | ERecv _ => tru "recv" + | ESleep _ => tru "sleep" + | ESpawn _ => tru "spawn" + and eff = fn (e', _) => eff' e' + in + eff + end + +(* TODO: test this. *) +val effectfulMap = + let + fun doVal ((_, name, _, e, _), effMap) = + if effectful false effMap false 0 e + then IS.add (effMap, name) + else effMap + val doDecl = + fn (DVal v, effMap) => doVal (v, effMap) + (* Repeat the list of declarations a number of times equal to its size. *) + | (DValRec vs, effMap) => + List.foldl doVal effMap (List.concat (List.map (fn _ => vs) vs)) + (* ASK: any other cases? *) + | (_, effMap) => effMap + in + MonoUtil.File.fold {typ = #2, exp = #2, decl = doDecl} IS.empty + end + + +(* SQL analysis. *) val useInjIfPossible = - fn SqConst prim => Inj (EPrim (Prim.String (Prim.Normal, Prim.toString prim)), ErrorMsg.dummySpan) + fn SqConst prim => Inj (EPrim (Prim.String (Prim.Normal, Prim.toString prim)), + ErrorMsg.dummySpan) | sqexp => sqexp fun equalities (canonicalTable : string -> string) : @@ -89,6 +206,7 @@ val tableDml = (* Program instrumentation. *) +fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), ErrorMsg.dummySpan) val stringTyp = (TFfi ("Basis", "string"), ErrorMsg.dummySpan) val sequence = @@ -103,7 +221,7 @@ val sequence = fun ffiAppCache' (func, index, args) : Mono.exp' = EFfiApp ("Sqlcache", func ^ Int.toString index, args) -fun ffiAppCache (func, index, args) : Mono. exp = +fun ffiAppCache (func, index, args) : Mono.exp = (ffiAppCache' (func, index, args), ErrorMsg.dummySpan) val varPrefix = "queryResult" @@ -113,7 +231,17 @@ fun indexOfName varName = then Int.fromString (String.extract (varName, String.size varPrefix, NONE)) else NONE -val incRels = MonoUtil.Exp.map {typ = fn x => x, exp = fn ERel n => ERel (n + 1) | x => x} +(* Always increments negative indices because that's what we need later. *) +fun incRelsBound bound inc = + MonoUtil.Exp.mapB + {typ = fn x => x, + exp = fn level => + (fn ERel n => ERel (if n >= level orelse n < 0 then n + inc else n) + | x => x), + bind = fn (level, MonoUtil.Exp.RelE _) => level + 1 | (level, _) => level} + bound + +val incRels = incRelsBound 0 (* Filled in by instrumentQuery during Monoize, used during Sqlcache. *) val urlifiedRel0s : Mono.exp IM.map ref = ref IM.empty @@ -129,12 +257,11 @@ val instrumentQuery = val i = !nextQuery before nextQuery := !nextQuery + 1 in urlifiedRel0s := IM.insert (!urlifiedRel0s, i, urlifiedRel0); - (* ASK: name variables properly? *) (ELet (varPrefix ^ Int.toString i, typ, query, (* Uses a dummy FFI call to keep the urlified expression around, which in turn keeps the declarations required for urlification safe from MonoShake. The dummy call is removed during Sqlcache. *) - (* ASK: is there a better way? *) + (* TODO: thread a Monoize.Fm.t through this module. *) (ESeq ((EFfiApp ("Sqlcache", "dummy", [(urlifiedRel0, stringTyp)]), loc), (ERel 0, loc)), loc)), @@ -145,28 +272,26 @@ val instrumentQuery = iq end -val gunk : ((string * string) * Mono.exp) list list ref = ref [[]] - fun cacheWrap (query, i, urlifiedRel0, eqs) = case query of (EQuery {state = typ, ...}, _) => let + val () = ffiInfo := {index = i, params = length eqs} :: !ffiInfo val loc = ErrorMsg.dummySpan - (* TODO: deal with effectful injected expressions. *) - val args = (ffiInfo := {index = i, params = length eqs} :: !ffiInfo; - map (fn (_, e) => (e, stringTyp)) eqs) before gunk := eqs :: !gunk - val argsInc = map (fn (e, t) => (incRels e, t)) args + (* We ensure before this step that all arguments aren't effectful. + by turning them into local variables as needed. *) + val args = map (fn (_, e) => (e, stringTyp)) eqs + val argsInc = map (fn (e, typ) => (incRels 1 e, typ)) args + val check = ffiAppCache ("check", i, args) + val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argsInc) + val rel0 = (ERel 0, loc) in - (ECase (ffiAppCache ("check", i, args), + (ECase (check, [((PNone stringTyp, loc), - (ELet ("q", typ, query, - (ESeq (ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argsInc), - (ERel 0, loc)), - loc)), - loc)), + (ELet ("q", typ, query, (ESeq (store, rel0), loc)), loc)), ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), - (* ASK: what does this bool do? *) - (EUnurlify ((ERel 0, loc), typ, false), loc))], + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, typ, false), loc))], {disc = stringTyp, result = typ}), loc) end @@ -181,20 +306,66 @@ fun fileMapfold doExp file start = fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file ()) -val addChecking = +fun addChecking file = let fun doExp queryInfo = - fn e' as ELet (v, t, queryExp as (EQuery {query = queryText, ...}, _), body) => + fn e' as ELet (v, t, + queryExp' as (EQuery {query = origQueryText, + initial, body, state, tables, exps}, queryLoc), + letBody) => let + val loc = ErrorMsg.dummySpan + val chunks = chunkify origQueryText + fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val (newQueryText, newVariables) = + (* Important that this is foldr (to oppose foldl below). *) + List.foldr + (fn (chunk, (qText, newVars)) => + case chunk of + Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Exp (e as (ERel _, _)) => (strcat (e, qText), newVars) + | Exp (e as (ENamed _, _)) => (strcat (e, qText), newVars) + (* Head of newVars has lowest index. *) + | Exp e => + let + val n = length newVars + in + (* This is the (n + 1)th new variable, so + there are already n new variables bound, + so we increment indices by n. *) + (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) + end + | String s => (strcat (stringExp s, qText), newVars)) + (stringExp "", []) + chunks + fun wrapLets e' = + (* Important that this is foldl (to oppose foldr above). *) + List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) e' newVariables + (* Increment once for each new variable just made. *) + val queryExp = incRels (length newVariables) + (EQuery {query = newQueryText, + initial = initial, + body = body, + state = state, + tables = tables, + exps = exps}, + queryLoc) + val (EQuery {query = queryText, ...}, _) = queryExp + (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)); *) fun bind x f = Option.mapPartial f x + fun guard b x = if b then x else NONE + (* DEBUG: set first boolean argument to true to turn on printing. *) + fun safe bound = not o effectful true (effectfulMap file) false bound val attempt = (* Ziv misses Haskell's do notation.... *) + guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( bind (parse query queryText) (fn queryParsed => - (Print.preface ("gunk> ", (MonoPrint.p_exp MonoEnv.empty queryExp)); bind (indexOfName v) (fn i => bind (equalitiesQuery queryParsed) (fn eqs => bind (IM.find (!urlifiedRel0s, i)) (fn urlifiedRel0 => - SOME (ELet (v, t, cacheWrap (queryExp, i, urlifiedRel0, eqs), body), + SOME (wrapLets (ELet (v, t, + cacheWrap (queryExp, i, urlifiedRel0, eqs), + incRelsBound 1 (length newVariables) letBody)), SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, i)) queryInfo (tablesQuery queryParsed))))))) @@ -206,7 +377,7 @@ val addChecking = | ESeq ((EFfiApp ("Sqlcache", "dummy", _), _), (e', _)) => (e', queryInfo) | e' => (e', queryInfo) in - fn file => fileMapfold (fn exp => fn state => doExp state exp) file SIMM.empty + fileMapfold (fn exp => fn state => doExp state exp) file SIMM.empty end fun addFlushing (file, queryInfo) = @@ -231,8 +402,10 @@ fun addFlushing (file, queryInfo) = fun go file = let val () = Sql.sqlcacheMode := true + val file' = addFlushing (addChecking file) + val () = Sql.sqlcacheMode := false in - addFlushing (addChecking file) before Sql.sqlcacheMode := false + file' end -- cgit v1.2.3 From dc5e7102563b9c0714391f86b6dcf852445ee192 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 10 Nov 2014 22:04:40 -0500 Subject: Progress towards invalidation based on equalities of fields. --- caching-tests/test.db | Bin 5120 -> 5120 bytes src/cjr_print.sml | 23 ++- src/iflow.sml | 116 ++++++------ src/sources | 2 + src/sql.sig | 26 +-- src/sql.sml | 32 ++-- src/sqlcache.sml | 474 ++++++++++++++++++++++---------------------------- src/union_find_fn.sml | 47 +++++ 8 files changed, 368 insertions(+), 352 deletions(-) create mode 100644 src/union_find_fn.sml (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db index 66b6ad88..a4661341 100644 Binary files a/caching-tests/test.db and b/caching-tests/test.db differ diff --git a/src/cjr_print.sml b/src/cjr_print.sml index c150631c..56310b81 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3400,19 +3400,24 @@ fun p_file env (ds, ps) = let val i = Int.toString index fun paramRepeat itemi sep = let - val rec f = - fn 0 => itemi (Int.toString 0) - | n => f (n-1) ^ itemi (Int.toString n) + fun f n = + if n < 0 then "" + else if n = 0 then itemi (Int.toString 0) + else f (n-1) ^ sep ^ itemi (Int.toString n) in f (params - 1) end - val args = paramRepeat (fn p => "uw_Basis_string p" ^ p) ", " + fun paramRepeatInit itemi sep = + if params = 0 then "" else sep ^ paramRepeat itemi sep + val args = paramRepeatInit (fn p => "uw_Basis_string p" ^ p) ", " val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" ^ p ^ " = NULL;") "\n" val sets = paramRepeat (fn p => "param" ^ i ^ "_" ^ p ^ " = strdup(p" ^ p ^ ");") "\n" val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") "\n" - val eqs = paramRepeat (fn p => "strcmp(param" ^ i ^ "_" ^ p - ^ ", p" ^ p ^ ")") " || " + (* Starting || makes logic easier when there are no parameters. *) + val eqs = paramRepeatInit (fn p => "strcmp(param" ^ i ^ "_" ^ p + ^ ", p" ^ p ^ ")") + " || " in box [string "static char *cacheQuery", string i, string " = NULL;", @@ -3425,14 +3430,14 @@ fun p_file env (ds, ps) = newline, string "static uw_Basis_string uw_Sqlcache_check", string i, - string "(uw_context ctx, ", + string "(uw_context ctx", string args, string ") {\n puts(\"SQLCACHE: checked ", string i, string ".\");\n if (cacheQuery", string i, (* ASK: is returning the pointer okay? Should we duplicate? *) - string " == NULL || ", + string " == NULL", string eqs, string ") {\n puts(\"miss D:\");\n uw_recordingStart(ctx);\n return NULL;\n } else {\n puts(\"hit :D\");\n uw_write(ctx, cacheWrite", string i, @@ -3442,7 +3447,7 @@ fun p_file env (ds, ps) = newline, string "static uw_unit uw_Sqlcache_store", string i, - string "(uw_context ctx, uw_Basis_string s, ", + string "(uw_context ctx, uw_Basis_string s", string args, string ") {\n free(cacheQuery", string i, diff --git a/src/iflow.sml b/src/iflow.sml index 40cf8993..f68d8f72 100644 --- a/src/iflow.sml +++ b/src/iflow.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -115,36 +115,36 @@ fun p_reln r es = | PCon1 s => box [string (s ^ "("), p_list p_exp es, string ")"] - | Eq => p_bop "=" es - | Ne => p_bop "<>" es - | Lt => p_bop "<" es - | Le => p_bop "<=" es - | Gt => p_bop ">" es - | Ge => p_bop ">=" es + | Cmp Eq => p_bop "=" es + | Cmp Ne => p_bop "<>" es + | Cmp Lt => p_bop "<" es + | Cmp Le => p_bop "<=" es + | Cmp Gt => p_bop ">" es + | Cmp Ge => p_bop ">=" es fun p_prop p = case p of True => string "True" | False => string "False" | Unknown => string "??" - | And (p1, p2) => box [string "(", - p_prop p1, - string ")", - space, - string "&&", - space, - string "(", - p_prop p2, - string ")"] - | Or (p1, p2) => box [string "(", - p_prop p1, - string ")", - space, - string "||", - space, - string "(", - p_prop p2, - string ")"] + | Lop (And, p1, p2) => box [string "(", + p_prop p1, + string ")", + space, + string "&&", + space, + string "(", + p_prop p2, + string ")"] + | Lop (Or, p1, p2) => box [string "(", + p_prop p1, + string ")", + space, + string "||", + space, + string "(", + p_prop p2, + string ")"] | Reln (r, es) => p_reln r es | Cond (e, p) => box [string "(", p_exp e, @@ -518,7 +518,7 @@ fun representative (db : database, e) = Variety = Nothing, Known = ref (!(#Known (unNode r))), Ge = ref NONE}) - + val r'' = ref (Node {Id = nodeId (), Rep = ref NONE, Cons = #Cons (unNode r), @@ -529,7 +529,7 @@ fun representative (db : database, e) = #Rep (unNode r) := SOME r''; r' end - | _ => raise Contradiction + | _ => raise Contradiction end in rep e @@ -687,9 +687,9 @@ fun assert (db, a) = end | _ => raise Contradiction end - | (Eq, [e1, e2]) => + | (Cmp Eq, [e1, e2]) => markEq (representative (db, e1), representative (db, e2)) - | (Ge, [e1, e2]) => + | (Cmp Ge, [e1, e2]) => let val r1 = representative (db, e1) val r2 = representative (db, e2) @@ -734,14 +734,14 @@ fun check (db, a) = (case #Variety (unNode (representative (db, e))) of Dt1 (f', _) => f' = f | _ => false) - | (Eq, [e1, e2]) => + | (Cmp Eq, [e1, e2]) => let val r1 = representative (db, e1) val r2 = representative (db, e2) in repOf r1 = repOf r2 end - | (Ge, [e1, e2]) => + | (Cmp Ge, [e1, e2]) => let val r1 = representative (db, e1) val r2 = representative (db, e2) @@ -848,7 +848,7 @@ fun setHyps (n', hs) = (hyps := (n', hs, ref false); Cc.clear db; app (fn a => Cc.assert (db, a)) hs) - end + end fun useKeys () = let @@ -872,7 +872,7 @@ fun useKeys () = let val r = Cc.check (db, - AReln (Eq, [Proj (r1, f), + AReln (Cmp Eq, [Proj (r1, f), Proj (r2, f)])) in (*Print.prefaces "Fs" @@ -888,7 +888,7 @@ fun useKeys () = r end)) ks then (changed := true; - Cc.assert (db, AReln (Eq, [r1, r2])); + Cc.assert (db, AReln (Cmp Eq, [r1, r2])); finder (hyps, acc)) else finder (hyps, a :: acc) @@ -1115,7 +1115,7 @@ fun havocCookie cname = val (_, hs, _) = !hyps in hnames := n + 1; - hyps := (n, List.filter (fn AReln (Eq, [_, Func (Other f, [])]) => f <> cname | _ => true) hs, ref false) + hyps := (n, List.filter (fn AReln (Cmp Eq, [_, Func (Other f, [])]) => f <> cname | _ => true) hs, ref false) end fun check a = Cc.check (db, a) @@ -1138,7 +1138,7 @@ fun removeDups (ls : (string * string) list) = val ls = removeDups ls in if List.exists (fn x' => x' = x) ls then - ls + ls else x :: ls end @@ -1171,7 +1171,7 @@ fun expIn rv env rvOf = | Null => inl (Func (DtCon0 "None", [])) | SqNot e => inr (case expIn e of - inl e => Reln (Eq, [e, Func (DtCon0 "Basis.bool.False", [])]) + inl e => Reln (Cmp Eq, [e, Func (DtCon0 "Basis.bool.False", [])]) | inr _ => Unknown) | Field (v, f) => inl (Proj (rvOf v, f)) | Computed _ => default () @@ -1181,15 +1181,15 @@ fun expIn rv env rvOf = val e2 = expIn e2 in inr (case (bo, e1, e2) of - (Exps f, inl e1, inl e2) => f (e1, e2) - | (Props f, v1, v2) => + (RCmp c, inl e1, inl e2) => Reln (Cmp c, [e1, e2]) + | (RLop l, v1, v2) => let fun pin v = case v of - inl e => Reln (Eq, [e, Func (DtCon0 "Basis.bool.True", [])]) + inl e => Reln (Cmp Eq, [e, Func (DtCon0 "Basis.bool.True", [])]) | inr p => p in - f (pin v1, pin v2) + Lop (l, pin v1, pin v2) end | _ => Unknown) end @@ -1205,7 +1205,7 @@ fun expIn rv env rvOf = (case expIn e of inl e => inl (Func (Other f, [e])) | _ => default ()) - + | Unmodeled => inl (Func (Other "allow", [rv ()])) end in @@ -1219,8 +1219,8 @@ fun decomp {Save = save, Restore = restore, Add = add} = True => (k () handle Cc.Contradiction => ()) | False => () | Unknown => () - | And (p1, p2) => go p1 (fn () => go p2 k) - | Or (p1, p2) => + | Lop (And, p1, p2) => go p1 (fn () => go p2 k) + | Lop (Or, p1, p2) => let val saved = save () in @@ -1351,7 +1351,7 @@ fun doQuery (arg : 'a doQuery) (e as (_, loc)) = | SOME e => let val p = case expIn e of - inl e => Reln (Eq, [e, Func (DtCon0 "Basis.bool.True", [])]) + inl e => Reln (Cmp Eq, [e, Func (DtCon0 "Basis.bool.True", [])]) | inr p => p val saved = #Save arg () @@ -1365,9 +1365,9 @@ fun doQuery (arg : 'a doQuery) (e as (_, loc)) = fun normal () = doWhere normal' in (case #Select r of - [SqExp (Binop (Exps bo, Count, SqConst (Prim.Int 0)), f)] => - (case bo (Const (Prim.Int 1), Const (Prim.Int 2)) of - Reln (Gt, [Const (Prim.Int 1), Const (Prim.Int 2)]) => + [SqExp (Binop (RCmp bo, Count, SqConst (Prim.Int 0)), f)] => + (case bo of + Gt => (case #Cont arg of SomeCol _ => () | AllCols k => @@ -1469,7 +1469,7 @@ fun evalExp env (e as (_, loc)) k = evalExp env e (fn e => doArgs (es, e :: acc)) in doArgs (es, []) - end + end in case #1 e of EPrim p => k (Const p) @@ -1519,7 +1519,7 @@ fun evalExp env (e as (_, loc)) k = ([], []) => (evalExp env' (#body rf) (fn _ => ()); St.reinstate saved; default ()) - + | (arg :: args, mode :: modes) => evalExp env arg (fn arg => let @@ -1663,7 +1663,7 @@ fun evalExp env (e as (_, loc)) k = Save = St.stash, Restore = St.reinstate, Cont = AllCols (fn x => - (St.assert [AReln (Eq, [r, x])]; + (St.assert [AReln (Cmp Eq, [r, x])]; evalExp (acc :: r :: env) b k))} q end) | EDml (e, _) => @@ -1697,15 +1697,15 @@ fun evalExp env (e as (_, loc)) k = | Delete (tab, e) => let val old = St.nextVar () - + val expIn = expIn (Var o St.nextVar) env (fn "T" => Var old | _ => raise Fail "Iflow.evalExp: Bad field expression in DELETE") val p = case expIn e of - inl e => raise Fail "Iflow.evalExp: DELETE with non-boolean" + inl e => raise Fail "Iflow.evalExp: DELETE with non-boolean" | inr p => p - + val saved = St.stash () in St.assert [AReln (Sql (tab ^ "$Old"), [Var old]), @@ -1748,7 +1748,7 @@ fun evalExp env (e as (_, loc)) k = (f, Proj (Var old, f)) :: fs) fs fs' val p = case expIn e of - inl e => raise Fail "Iflow.evalExp: UPDATE with non-boolean" + inl e => raise Fail "Iflow.evalExp: UPDATE with non-boolean" | inr p => p val saved = St.stash () in @@ -1764,7 +1764,7 @@ fun evalExp env (e as (_, loc)) k = k (Recd [])) handle Cc.Contradiction => ()) end) - + | ENextval (EPrim (Prim.String (_, seq)), _) => let val nv = St.nextVar () @@ -1780,7 +1780,7 @@ fun evalExp env (e as (_, loc)) k = val e = Var (St.nextVar ()) val e' = Func (Other ("cookie/" ^ cname), []) in - St.assert [AReln (Known, [e]), AReln (Eq, [e, e'])]; + St.assert [AReln (Known, [e]), AReln (Cmp Eq, [e, e'])]; k e end @@ -2159,7 +2159,7 @@ fun check (file : file) = end | _ => ()) end - + | _ => () in app decl (#1 file) diff --git a/src/sources b/src/sources index 7ad60517..33c01f94 100644 --- a/src/sources +++ b/src/sources @@ -171,6 +171,8 @@ $(SRC)/mono_print.sml $(SRC)/sql.sig $(SRC)/sql.sml +$(SRC)/union_find_fn.sml + $(SRC)/multimap_fn.sml $(SRC)/sqlcache.sig diff --git a/src/sql.sig b/src/sql.sig index cf2ae14a..5f5d1b23 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -26,24 +26,30 @@ datatype exp = | Recd of (string * exp) list | Proj of exp * string -datatype reln = - Known - | Sql of string - | PCon0 of string - | PCon1 of string - | Eq +datatype cmp = + Eq | Ne | Lt | Le | Gt | Ge +datatype reln = + Known + | Sql of string + | PCon0 of string + | PCon1 of string + | Cmp of cmp + +datatype lop = + And + | Or + datatype prop = True | False | Unknown - | And of prop * prop - | Or of prop * prop + | Lop of lop * prop * prop | Reln of reln * exp list | Cond of exp * prop @@ -52,8 +58,8 @@ type 'a parser val parse : 'a parser -> Mono.exp -> 'a option datatype Rel = - Exps of exp * exp -> prop - | Props of prop * prop -> prop + RCmp of cmp + | RLop of lop datatype sqexp = SqConst of Prim.t diff --git a/src/sql.sml b/src/sql.sml index 7cfed022..59b4eac6 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -20,24 +20,30 @@ datatype exp = | Recd of (string * exp) list | Proj of exp * string -datatype reln = - Known - | Sql of string - | PCon0 of string - | PCon1 of string - | Eq +datatype cmp = + Eq | Ne | Lt | Le | Gt | Ge +datatype reln = + Known + | Sql of string + | PCon0 of string + | PCon1 of string + | Cmp of cmp + +datatype lop = + And + | Or + datatype prop = True | False | Unknown - | And of prop * prop - | Or of prop * prop + | Lop of lop * prop * prop | Reln of reln * exp list | Cond of exp * prop @@ -183,8 +189,8 @@ val field = wrap (follow (opt (follow t_ident (const "."))) | (NONE, f) => ("T", f)) (* Should probably deal with this MySQL/SQLite case better some day. *) datatype Rel = - Exps of exp * exp -> prop - | Props of prop * prop -> prop + RCmp of cmp + | RLop of lop datatype sqexp = SqConst of Prim.t @@ -200,7 +206,7 @@ datatype sqexp = | Unmodeled | Null -fun cmp s r = wrap (const s) (fn () => Exps (fn (e1, e2) => Reln (r, [e1, e2]))) +fun cmp s r = wrap (const s) (fn () => RCmp r) val sqbrel = altL [cmp "=" Eq, cmp "<>" Ne, @@ -208,8 +214,8 @@ val sqbrel = altL [cmp "=" Eq, cmp "<" Lt, cmp ">=" Ge, cmp ">" Gt, - wrap (const "AND") (fn () => Props And), - wrap (const "OR") (fn () => Props Or)] + wrap (const "AND") (fn () => RLop Or), + wrap (const "OR") (fn () => RLop And)] datatype ('a, 'b) sum = inl of 'a | inr of 'b diff --git a/src/sqlcache.sml b/src/sqlcache.sml index d3c21371..59800ca3 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,6 +1,5 @@ structure Sqlcache (* :> SQLCACHE *) = struct -open Sql open Mono structure IS = IntBinarySet @@ -10,13 +9,14 @@ structure SS = BinarySetFn(SK) structure SM = BinaryMapFn(SK) structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) -(* Filled in by cacheWrap during Sqlcache. *) +(* Filled in by [cacheWrap] during [Sqlcache]. *) val ffiInfo : {index : int, params : int} list ref = ref [] fun getFfiInfo () = !ffiInfo (* Some FFIs have writing as their only effect, which the caching records. *) val ffiEffectful = + (* TODO: have this less hard-coded. *) let val fs = SS.fromList ["htmlifyInt_w", "htmlifyFloat_w", @@ -40,7 +40,7 @@ val ffiEffectful = (* Effect analysis. *) -(* Makes an exception for EWrite (which is recorded when caching). *) +(* Makes an exception for [EWrite] (which is recorded when caching). *) fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : Mono.exp -> bool = (* If result is true, expression is definitely effectful. If result is false, then expression is definitely not effectful if effs is fully @@ -62,7 +62,6 @@ fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : Mono.e | ECon (_, _, SOME e) => eff e | ENone _ => false | ESome (_, e) => eff e - (* TODO: use FFI whitelist. *) | EFfi (m, f) => if ffiEffectful (m, f) then tru "ffi" else false | EFfiApp (m, f, _) => if ffiEffectful (m, f) then tru "ffiapp" else false (* ASK: we're calling functions effectful if they have effects when @@ -131,82 +130,188 @@ val effectfulMap = end +(* Boolean formula normalization. *) + +datatype normalForm = Cnf | Dnf + +datatype 'atom formula = + Atom of 'atom + | Negate of 'atom formula + | Combo of normalForm * 'atom formula list + +val flipNf = fn Cnf => Dnf | Dnf => Cnf + +fun bind xs f = List.concat (map f xs) + +val rec cartesianProduct : 'a list list -> 'a list list = + fn [] => [[]] + | (xs :: xss) => bind (cartesianProduct xss) + (fn ys => bind xs (fn x => [x :: ys])) + +fun normalize (negate : 'atom -> 'atom) (norm : normalForm) = + fn Atom x => [[x]] + | Negate f => map (map negate) (normalize negate (flipNf norm) f) + | Combo (n, fs) => + let + val fss = bind fs (normalize negate n) + in + if n = norm then fss else cartesianProduct fss + end + +fun mapFormula mf = + fn Atom x => Atom (mf x) + | Negate f => Negate (mapFormula mf f) + | Combo (n, fs) => Combo (n, map (mapFormula mf) fs) + + (* SQL analysis. *) -val useInjIfPossible = - fn SqConst prim => Inj (EPrim (Prim.String (Prim.Normal, Prim.toString prim)), - ErrorMsg.dummySpan) - | sqexp => sqexp +val rec chooseTwos : 'a list -> ('a * 'a) list = + fn [] => [] + | x :: ys => map (fn y => (x, y)) ys @ chooseTwos ys + +datatype atomExp = + QueryArg of int + | DmlRel of int + | Prim of Prim.t + | Field of string * string -fun equalities (canonicalTable : string -> string) : - sqexp -> ((string * string) * Mono.exp) list option = +structure AtomExpKey : ORD_KEY = struct + +type ord_key = atomExp + +val compare = + fn (QueryArg n1, QueryArg n2) => Int.compare (n1, n2) + | (QueryArg _, _) => LESS + | (_, QueryArg _) => GREATER + | (DmlRel n1, DmlRel n2) => Int.compare (n1, n2) + | (DmlRel _, _) => LESS + | (_, DmlRel _) => GREATER + | (Prim p1, Prim p2) => Prim.compare (p1, p2) + | (Prim _, _) => LESS + | (_, Prim _) => GREATER + | (Field (t1, f1), Field (t2, f2)) => String.compare (t1 ^ "." ^ f1, t2 ^ "." ^ f2) + +end + +structure UF = UnionFindFn(AtomExpKey) + +fun conflictMaps (fQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula, + fDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula) = let - val rec eqs = - fn Binop (Exps f, e1, e2) => - (* TODO: use a custom datatype in Exps instead of a function. *) - (case f (Var 1, Var 2) of - Reln (Eq, [Var 1, Var 2]) => - let - val (e1', e2') = (useInjIfPossible e1, useInjIfPossible e2) - in - case (e1', e2') of - (Field (t, f), Inj i) => SOME [((canonicalTable t, f), i)] - | (Inj i, Field (t, f)) => SOME [((canonicalTable t, f), i)] - | _ => NONE - end - | _ => NONE) - | Binop (Props f, e1, e2) => - (* TODO: use a custom datatype in Props instead of a function. *) - (case f (True, False) of - And (True, False) => - (case (eqs e1, eqs e2) of - (SOME eqs1, SOME eqs2) => SOME (eqs1 @ eqs2) - | _ => NONE) - | _ => NONE) + val toKnownEquality = + (* [NONE] here means unkown. Anything that isn't a comparison between + two knowns shouldn't be used, and simply dropping unused terms is + okay in disjunctive normal form. *) + fn (Sql.Eq, SOME e1, SOME e2) => SOME (e1, e2) | _ => NONE + val equivClasses : (Sql.cmp * atomExp option * atomExp option) list -> atomExp list list = + UF.classes + o List.foldl UF.union' UF.empty + o List.mapPartial toKnownEquality + fun addToEqs (eqs, n, e) = + case IM.find (eqs, n) of + (* Comparing to a constant seems better? *) + SOME (EPrim _) => eqs + | _ => IM.insert (eqs, n, e) + val accumulateEqs = + (* [NONE] means we have a contradiction. *) + fn (_, NONE) => NONE + | ((Prim p1, Prim p2), eqso) => + (case Prim.compare (p1, p2) of + EQUAL => eqso + | _ => NONE) + | ((QueryArg n, Prim p), SOME eqs) => SOME (addToEqs (eqs, n, EPrim p)) + | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, ERel r)) + | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, EPrim p)) + | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, ERel r)) + (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. *) + | (_, eqso) => eqso + val eqsOfClass : atomExp list -> Mono.exp' IM.map option = + List.foldl accumulateEqs (SOME IM.empty) + o chooseTwos + fun toAtomExps rel (cmp, e1, e2) = + let + val qa = + (* Here [NONE] means unkown. *) + fn Sql.SqConst p => SOME (Prim p) + | Sql.Field tf => SOME (Field tf) + | Sql.Inj (EPrim p, _) => SOME (Prim p) + | Sql.Inj (ERel n, _) => SOME (rel n) + (* We can't deal with anything else. *) + | _ => NONE + in + (cmp, qa e1, qa e2) + end + fun negateCmp (cmp, e1, e2) = + (case cmp of + Sql.Eq => Sql.Ne + | Sql.Ne => Sql.Eq + | Sql.Lt => Sql.Ge + | Sql.Le => Sql.Gt + | Sql.Gt => Sql.Le + | Sql.Ge => Sql.Lt, + e1, e2) + val markQuery = mapFormula (toAtomExps QueryArg) + val markDml = mapFormula (toAtomExps DmlRel) + val dnf = normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) + (* If one of the terms in a conjunction leads to a contradiction, which + is represented by [NONE], drop the entire conjunction. *) + val sequenceOption = List.foldr (fn (SOME x, SOME xs) => SOME (x :: xs) | _ => NONE) + (SOME []) in - eqs + List.mapPartial (sequenceOption o map eqsOfClass o equivClasses) dnf end -val equalitiesQuery = - fn Query1 {From = tablePairs, Where = SOME exp, ...} => - equalities - (* If we have [SELECT ... FROM T AS T' ...], use T, not T'. *) - (fn t => - case List.find (fn (_, tAs) => t = tAs) tablePairs of - NONE => t - | SOME (tOrig, _) => tOrig) - exp - | Query1 {Where = NONE, ...} => SOME [] - | _ => NONE - -val equalitiesDml = - fn Insert (tab, eqs) => SOME (List.mapPartial - (fn (name, sqexp) => - case useInjIfPossible sqexp of - Inj e => SOME ((tab, name), e) - | _ => NONE) - eqs) - | Delete (tab, exp) => equalities (fn _ => tab) exp - (* TODO: examine the updated values and not just the way they're filtered. *) - (* For example, UPDATE foo SET Id = 9001 WHERE Id = 42 should update both the - Id = 42 and Id = 9001 cache entries. Could also think of it as doing a - Delete immediately followed by an Insert. *) - | Update (tab, _, exp) => equalities (fn _ => tab) exp +val rec sqexpToFormula = + fn Sql.SqTrue => Combo (Cnf, []) + | Sql.SqFalse => Combo (Dnf, []) + | Sql.SqNot e => Negate (sqexpToFormula e) + | Sql.Binop (Sql.RCmp c, e1, e2) => Atom (c, e1, e2) + | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Cnf | Sql.Or => Dnf, + [sqexpToFormula p1, sqexpToFormula p2]) + (* ASK: any other sqexps that can be props? *) + | _ => raise Match + +val rec queryToFormula = + fn Sql.Query1 {From = tablePairs, Where = NONE, ...} => Combo (Cnf, []) + | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => + let + fun renameString table = + case List.find (fn (_, t) => table = t) tablePairs of + NONE => table + | SOME (realTable, _) => realTable + val renameSqexp = + fn Sql.Field (table, field) => Sql.Field (renameString table, field) + | e => e + fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) + in + mapFormula renameAtom (sqexpToFormula e) + end + | Sql.Union (q1, q2) => Combo (Dnf, [queryToFormula q1, queryToFormula q2]) + +val rec dmlToFormula = + fn Sql.Insert (table, vals) => + Combo (Cnf, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) + | Sql.Delete (_, wher) => sqexpToFormula wher + (* TODO: refine formula for the vals part, which could take into account the wher part. *) + | Sql.Update (table, vals, wher) => Combo (Dnf, [dmlToFormula (Sql.Insert (table, vals)), + dmlToFormula (Sql.Delete (table, wher))]) val rec tablesQuery = - fn Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) - | Union (q1, q2) => SS.union (tablesQuery q1, tablesQuery q2) + fn Sql.Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Sql.Union (q1, q2) => SS.union (tablesQuery q1, tablesQuery q2) val tableDml = - fn Insert (tab, _) => tab - | Delete (tab, _) => tab - | Update (tab, _, _) => tab + fn Sql.Insert (tab, _) => tab + | Sql.Delete (tab, _) => tab + | Sql.Update (tab, _, _) => tab (* Program instrumentation. *) fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), ErrorMsg.dummySpan) + val stringTyp = (TFfi ("Basis", "string"), ErrorMsg.dummySpan) val sequence = @@ -243,10 +348,10 @@ fun incRelsBound bound inc = val incRels = incRelsBound 0 -(* Filled in by instrumentQuery during Monoize, used during Sqlcache. *) +(* Filled in by instrumentQuery during [Monoize], used during [Sqlcache]. *) val urlifiedRel0s : Mono.exp IM.map ref = ref IM.empty -(* Used by Monoize. *) +(* Used by [Monoize]. *) val instrumentQuery = let val nextQuery = ref 0 @@ -260,9 +365,12 @@ val instrumentQuery = (ELet (varPrefix ^ Int.toString i, typ, query, (* Uses a dummy FFI call to keep the urlified expression around, which in turn keeps the declarations required for urlification safe from - MonoShake. The dummy call is removed during Sqlcache. *) - (* TODO: thread a Monoize.Fm.t through this module. *) - (ESeq ((EFfiApp ("Sqlcache", "dummy", [(urlifiedRel0, stringTyp)]), loc), + [MonoShake]. The dummy call is removed during [Sqlcache]. *) + (* TODO: thread a [Monoize.Fm.t] through this module. *) + (ESeq ((EFfiApp ("Sqlcache", + "dummy", + [(urlifiedRel0, stringTyp)]), + loc), (ERel 0, loc)), loc)), loc) @@ -272,18 +380,18 @@ val instrumentQuery = iq end -fun cacheWrap (query, i, urlifiedRel0, eqs) = +fun cacheWrap (query, i, urlifiedRel0, args) = case query of (EQuery {state = typ, ...}, _) => let - val () = ffiInfo := {index = i, params = length eqs} :: !ffiInfo + val () = ffiInfo := {index = i, params = length args} :: !ffiInfo val loc = ErrorMsg.dummySpan (* We ensure before this step that all arguments aren't effectful. by turning them into local variables as needed. *) - val args = map (fn (_, e) => (e, stringTyp)) eqs - val argsInc = map (fn (e, typ) => (incRels 1 e, typ)) args - val check = ffiAppCache ("check", i, args) - val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argsInc) + val argTyps = map (fn e => (e, stringTyp)) args + val argTypsInc = map (fn (e, typ) => (incRels 1 e, typ)) argTyps + val check = ffiAppCache ("check", i, argTyps) + val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argTypsInc) val rel0 = (ERel 0, loc) in (ECase (check, @@ -315,18 +423,16 @@ fun addChecking file = letBody) => let val loc = ErrorMsg.dummySpan - val chunks = chunkify origQueryText + val chunks = Sql.chunkify origQueryText fun strcat (e1, e2) = (EStrcat (e1, e2), loc) val (newQueryText, newVariables) = (* Important that this is foldr (to oppose foldl below). *) List.foldr (fn (chunk, (qText, newVars)) => + (* Variable bound to the head of newBs will have the lowest index. *) case chunk of - Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) - | Exp (e as (ERel _, _)) => (strcat (e, qText), newVars) - | Exp (e as (ENamed _, _)) => (strcat (e, qText), newVars) - (* Head of newVars has lowest index. *) - | Exp e => + Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Sql.Exp e => let val n = length newVars in @@ -335,12 +441,15 @@ fun addChecking file = so we increment indices by n. *) (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) end - | String s => (strcat (stringExp s, qText), newVars)) + | Sql.String s => (strcat (stringExp s, qText), newVars)) (stringExp "", []) chunks fun wrapLets e' = (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) e' newVariables + List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) + e' + newVariables + val numArgs = length newVariables (* Increment once for each new variable just made. *) val queryExp = incRels (length newVariables) (EQuery {query = newQueryText, @@ -352,6 +461,7 @@ fun addChecking file = queryLoc) val (EQuery {query = queryText, ...}, _) = queryExp (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)); *) + val args = List.tabulate (numArgs, fn n => (ERel n, loc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE (* DEBUG: set first boolean argument to true to turn on printing. *) @@ -359,16 +469,15 @@ fun addChecking file = val attempt = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( - bind (parse query queryText) (fn queryParsed => + bind (Sql.parse Sql.query queryText) (fn queryParsed => bind (indexOfName v) (fn i => - bind (equalitiesQuery queryParsed) (fn eqs => bind (IM.find (!urlifiedRel0s, i)) (fn urlifiedRel0 => SOME (wrapLets (ELet (v, t, - cacheWrap (queryExp, i, urlifiedRel0, eqs), + cacheWrap (queryExp, i, urlifiedRel0, args), incRelsBound 1 (length newVariables) letBody)), SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, i)) queryInfo - (tablesQuery queryParsed))))))) + (tablesQuery queryParsed)))))) in case attempt of SOME pair => pair @@ -380,6 +489,22 @@ fun addChecking file = fileMapfold (fn exp => fn state => doExp state exp) file SIMM.empty end +fun invalidations (nQueryArgs, query, dml) = + let + val loc = ErrorMsg.dummySpan + val optionToExp = + fn NONE => (ENone stringTyp, loc) + | SOME e => (ESome (stringTyp, (e, loc)), loc) + fun eqsToInvalidation eqs = + let + fun inv n = if n < 0 then [] else optionToExp (IM.find (eqs, n)) :: inv (n - 1) + in + inv (nQueryArgs - 1) + end + in + map (map eqsToInvalidation) (conflictMaps (queryToFormula query, dmlToFormula dml)) + end + fun addFlushing (file, queryInfo) = let val allIndices : int list = SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] queryInfo @@ -388,7 +513,7 @@ fun addFlushing (file, queryInfo) = fn dmlExp as EDml (dmlText, _) => let val indices = - case parse dml dmlText of + case Sql.parse Sql.dml dmlText of SOME dmlParsed => SIMM.findList (queryInfo, tableDml dmlParsed) | NONE => allIndices in @@ -408,179 +533,4 @@ fun go file = file' end - -(* BEGIN OLD - -fun intExp (n, loc) = (EPrim (Prim.Int (Int64.fromInt n)), loc) -fun intTyp loc = (TFfi ("Basis", "int"), loc) -fun stringExp (s, loc) = (EPrim (Prim.String (Prim.Normal, s)), loc) - -fun boolPat (b, loc) = (PCon (Enum, - PConFfi {mod = "Basis", datatyp = "bool", arg = NONE, - con = if b then "True" else "False"}, - NONE), - loc) -fun boolTyp loc = (TFfi ("Basis", "int"), loc) - -fun ffiAppExp (module, func, index, args, loc) = - (EFfiApp (module, func ^ Int.toString index, args), loc) - -val sequence = - fn ((exp :: exps), loc) => - List.foldl (fn (exp, seq) => (ESeq (seq, exp), loc)) exp exps - | _ => raise Match - -fun antiguardUnit (cond, exp, loc) = - (ECase (cond, - [(boolPat (false, loc), exp), - (boolPat (true, loc), (ERecord [], loc))], - {disc = boolTyp loc, result = (TRecord [], loc)}), - loc) - -fun underAbs f (exp as (exp', loc)) = - case exp' of - EAbs (x, y, z, body) => (EAbs (x, y, z, underAbs f body), loc) - | _ => f exp - - -val rec tablesRead = - fn Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) - | Union (q1, q2) => SS.union (tablesRead q1, tablesRead q2) - -val tableWritten = - fn Insert (tab, _) => tab - | Delete (tab, _) => tab - | Update (tab, _, _) => tab - -fun tablesInExp' exp' = - let - val nothing = {read = SS.empty, written = SS.empty} - in - case exp' of - EQuery {query = e, ...} => - (case parse query e of - SOME q => {read = tablesRead q, written = SS.empty} - | NONE => nothing) - | EDml (e, _) => - (case parse dml e of - SOME q => {read = SS.empty, written = SS.singleton (tableWritten q)} - | NONE => nothing) - | _ => nothing - end - -val tablesInExp = - let - fun addTables (exp', {read, written}) = - let - val {read = r, written = w} = tablesInExp' exp' - in - {read = SS.union (r, read), written = SS.union (w, written)} - end - in - MonoUtil.Exp.fold {typ = #2, exp = addTables} - {read = SS.empty, written = SS.empty} - end - -fun addCacheCheck (index, exp) = - let - fun f (body as (_, loc)) = - let - val check = ffiAppExp ("Cache", "check", index, loc) - val store = ffiAppExp ("Cache", "store", index, loc) - in - antiguardUnit (check, sequence ([body, store], loc), loc) - end - in - underAbs f exp - end - -fun addCacheFlush (exp, tablesToIndices) = - let - fun addIndices (table, indices) = IS.union (indices, SIMM.find (tablesToIndices, table)) - fun f (body as (_, loc)) = - let - fun mapFfi func = List.map (fn i => ffiAppExp ("Cache", func, i, loc)) - val flushes = - IS.listItems (SS.foldr addIndices IS.empty (#written (tablesInExp body))) - in - sequence (mapFfi "flush" flushes @ [body] @ mapFfi "ready" flushes, loc) - end - in - underAbs f exp - end - -val handlerIndices = - let - val isUnit = - fn (TRecord [], _) => true - | _ => false - fun maybeAdd (d, soFar as {readers, writers}) = - case d of - DExport (Link ReadOnly, _, name, typs, typ, _) => - if List.all isUnit (typ::typs) - then {readers = IS.add (readers, name), writers = writers} - else soFar - | DExport (_, _, name, _, _, _) => (* Not read only. *) - {readers = readers, writers = IS.add (writers, name)} - | _ => soFar - in - MonoUtil.File.fold {typ = #2, exp = #2, decl = maybeAdd} - {readers = IS.empty, writers = IS.empty} - end - -fun fileFoldMapiSelected f init (file, indices) = - let - fun doExp (original as ((a, index, b, exp, c), state)) = - if IS.member (indices, index) - then let val (newExp, newState) = f (index, exp, state) - in ((a, index, b, newExp, c), newState) end - else original - fun doDecl decl state = - let - val result = - case decl of - DVal x => - let val (y, newState) = doExp (x, state) - in (DVal y, newState) end - | DValRec xs => - let val (ys, newState) = ListUtil.foldlMap doExp state xs - in (DValRec ys, newState) end - | _ => (decl, state) - in - Search.Continue result - end - fun nada x y = Search.Continue (x, y) - in - case MonoUtil.File.mapfold {typ = nada, exp = nada, decl = doDecl} file init of - Search.Continue x => x - | _ => raise Match (* Should never happen. *) - end - -fun fileMapSelected f = #1 o fileFoldMapiSelected (fn (_, x, _) => (f x, ())) () - -val addCacheChecking = - let - fun f (index, exp, tablesToIndices) = - (addCacheCheck (index, exp), - SS.foldr (fn (table, tsToIs) => SIMM.insert (tsToIs, table, index)) - tablesToIndices - (#read (tablesInExp exp))) - in - fileFoldMapiSelected f (SM.empty) - end - -fun addCacheFlushing (file, tablesToIndices, writers) = - fileMapSelected (fn exp => addCacheFlush (exp, tablesToIndices)) (file, writers) - -fun go file = - let - val {readers, writers} = handlerIndices file - val (fileWithChecks, tablesToIndices) = addCacheChecking (file, readers) - in - ffiIndices := IS.listItems readers; - addCacheFlushing (fileWithChecks, tablesToIndices, writers) - end - -END OLD *) - end diff --git a/src/union_find_fn.sml b/src/union_find_fn.sml new file mode 100644 index 00000000..42b2d4d7 --- /dev/null +++ b/src/union_find_fn.sml @@ -0,0 +1,47 @@ +functor UnionFindFn(K : ORD_KEY) = struct + +structure M = BinaryMapFn(K) +structure S = BinarySetFn(K) + +datatype entry = + Set of S.set + | Pointer of K.ord_key + +(* First map is the union-find tree, second stores equivalence classes. *) +type unionFind = entry M.map ref * S.set M.map + +val empty : unionFind = (ref M.empty, M.empty) + +fun findPair (uf, x) = + case M.find (!uf, x) of + NONE => (S.singleton x, x) + | SOME (Set set) => (set, x) + | SOME (Pointer parent) => + let + val (set, rep) = findPair (uf, parent) + in + uf := M.insert (!uf, x, Pointer rep); + (set, rep) + end + +fun find ((uf, _), x) = (S.listItems o #1 o findPair) (uf, x) + +fun classes (_, cs) = (map S.listItems o M.listItems) cs + +fun union ((uf, cs), x, y) = + let + val (xSet, xRep) = findPair (uf, x) + val (ySet, yRep) = findPair (uf, y) + val xySet = S.union (xSet, ySet) + in + (ref (M.insert (M.insert (!uf, yRep, Pointer xRep), + xRep, Set xySet)), + M.insert (case M.find (cs, yRep) of + NONE => cs + | SOME _ => #1 (M.remove (cs, yRep)), + xRep, xySet)) + end + +fun union' ((x, y), uf) = union (uf, x, y) + +end -- cgit v1.2.3 From a747e57a19be5a2bf0166efd86547b5d851a5902 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 11 Nov 2014 04:25:20 -0500 Subject: More invalidation progress. --- caching-tests/test.sql | 6 +- caching-tests/test.ur | 5 +- src/mono_util.sig | 4 +- src/mono_util.sml | 6 +- src/sqlcache.sml | 208 ++++++++++++++++++++++++++++++++++++------------- 5 files changed, 166 insertions(+), 63 deletions(-) (limited to 'src') diff --git a/caching-tests/test.sql b/caching-tests/test.sql index efa271ec..7ade7278 100644 --- a/caching-tests/test.sql +++ b/caching-tests/test.sql @@ -1,14 +1,14 @@ -CREATE TABLE uw_Test_foo01(uw_id integer NOT NULL, uw_bar text NOT NULL, +CREATE TABLE uw_Test_foo01(uw_id int8 NOT NULL, uw_bar text NOT NULL, PRIMARY KEY (uw_id) ); - CREATE TABLE uw_Test_foo10(uw_id integer NOT NULL, uw_bar text NOT NULL, + CREATE TABLE uw_Test_foo10(uw_id int8 NOT NULL, uw_bar text NOT NULL, PRIMARY KEY (uw_id) ); - CREATE TABLE uw_Test_tab(uw_id integer NOT NULL, uw_val integer NOT NULL, + CREATE TABLE uw_Test_tab(uw_id int8 NOT NULL, uw_val int8 NOT NULL, PRIMARY KEY (uw_id) ); diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 06ed456c..931612bc 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -3,7 +3,7 @@ table foo10 : {Id : int, Bar : string} PRIMARY KEY Id table tab : {Id : int, Val : int} PRIMARY KEY Id fun cache01 () = - res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); + res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 43); return Reading 1. {case res of @@ -33,7 +33,8 @@ fun cache11 () = fun flush01 () = - dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); + dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz01")); + (* dml (UPDATE foo01 SET Bar = "baz01" WHERE Id = 42); *) return Flushed 1! diff --git a/src/mono_util.sig b/src/mono_util.sig index da8b2e20..5c078a77 100644 --- a/src/mono_util.sig +++ b/src/mono_util.sig @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -68,7 +68,7 @@ structure Exp : sig val fold : {typ : Mono.typ' * 'state -> 'state, exp : Mono.exp' * 'state -> 'state} -> 'state -> Mono.exp -> 'state - + val exists : {typ : Mono.typ' -> bool, exp : Mono.exp' -> bool} -> Mono.exp -> bool diff --git a/src/mono_util.sml b/src/mono_util.sml index cc531625..fd80c64f 100644 --- a/src/mono_util.sml +++ b/src/mono_util.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -281,7 +281,7 @@ fun mapfoldB {typ = fc, exp = fe, bind} = S.map2 (mft t, fn t' => (ERedirect (e', t'), loc))) - + | EStrcat (e1, e2) => S.bind2 (mfe ctx e1, fn e1' => @@ -624,7 +624,7 @@ fun mapfoldB {typ = fc, exp = fe, decl = fd, bind} = (x, n, t', e', s))) in mfd - end + end fun mapfold {typ = fc, exp = fe, decl = fd} = mapfoldB {typ = fc, diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 59800ca3..095a1474 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -148,21 +148,40 @@ val rec cartesianProduct : 'a list list -> 'a list list = | (xs :: xss) => bind (cartesianProduct xss) (fn ys => bind xs (fn x => [x :: ys])) -fun normalize (negate : 'atom -> 'atom) (norm : normalForm) = +(* Pushes all negation to the atoms.*) +fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = + fn Atom x => Atom (if negating then negate x else x) + | Negate f => pushNegate negate (not negating) f + | Combo (n, fs) => Combo (if negating then flipNf n else n, map (pushNegate negate negating) fs) + +val rec flatten = + fn Combo (n, fs) => + Combo (n, List.foldr (fn (f, acc) => + case f of + Combo (n', fs') => if n = n' then fs' @ acc else f :: acc + | _ => f :: acc) + [] + (map flatten fs)) + | f => f + +fun normalize' (negate : 'atom -> 'atom) (norm : normalForm) = fn Atom x => [[x]] - | Negate f => map (map negate) (normalize negate (flipNf norm) f) + | Negate f => map (map negate) (normalize' negate (flipNf norm) f) | Combo (n, fs) => let - val fss = bind fs (normalize negate n) + val fss = bind fs (normalize' negate n) in if n = norm then fss else cartesianProduct fss end -fun mapFormula mf = - fn Atom x => Atom (mf x) - | Negate f => Negate (mapFormula mf f) - | Combo (n, fs) => Combo (n, map (mapFormula mf) fs) +fun normalize negate norm = normalize' negate norm o flatten o pushNegate negate false +fun mapFormulaSigned positive mf = + fn Atom x => Atom (mf (positive, x)) + | Negate f => Negate (mapFormulaSigned (not positive) mf f) + | Combo (n, fs) => Combo (n, map (mapFormulaSigned positive mf) fs) + +fun mapFormula mf = mapFormulaSigned true (fn (_, x) => mf x) (* SQL analysis. *) @@ -176,6 +195,17 @@ datatype atomExp = | Prim of Prim.t | Field of string * string +val equalAtomExp = + let + val isEqual = fn EQUAL => true | _ => false + in + fn (QueryArg n1, QueryArg n2) => n1 = n2 + | (DmlRel n1, DmlRel n2) => n1 = n2 + | (Prim p1, Prim p2) => isEqual (Prim.compare (p1, p2)) + | (Field (t1, f1), Field (t2, f2)) => isEqual (String.compare (t1 ^ "." ^ f1, t2 ^ "." ^ f2)) + | _ => false + end + structure AtomExpKey : ORD_KEY = struct type ord_key = atomExp @@ -196,9 +226,10 @@ end structure UF = UnionFindFn(AtomExpKey) -fun conflictMaps (fQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula, - fDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula) = - let +(* val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) +(* * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) +(* -> Mono.exp' IM.map list = *) +(* let *) val toKnownEquality = (* [NONE] here means unkown. Anything that isn't a comparison between two knowns shouldn't be used, and simply dropping unused terms is @@ -212,7 +243,7 @@ fun conflictMaps (fQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula, fun addToEqs (eqs, n, e) = case IM.find (eqs, n) of (* Comparing to a constant seems better? *) - SOME (EPrim _) => eqs + SOME (Prim _) => eqs | _ => IM.insert (eqs, n, e) val accumulateEqs = (* [NONE] means we have a contradiction. *) @@ -221,13 +252,13 @@ fun conflictMaps (fQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula, (case Prim.compare (p1, p2) of EQUAL => eqso | _ => NONE) - | ((QueryArg n, Prim p), SOME eqs) => SOME (addToEqs (eqs, n, EPrim p)) - | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, ERel r)) - | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, EPrim p)) - | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, ERel r)) + | ((QueryArg n, Prim p), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) + | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) + | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) + | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. *) | (_, eqso) => eqso - val eqsOfClass : atomExp list -> Mono.exp' IM.map option = + val eqsOfClass : atomExp list -> atomExp IM.map option = List.foldl accumulateEqs (SOME IM.empty) o chooseTwos fun toAtomExps rel (cmp, e1, e2) = @@ -252,16 +283,26 @@ fun conflictMaps (fQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula, | Sql.Gt => Sql.Le | Sql.Ge => Sql.Lt, e1, e2) - val markQuery = mapFormula (toAtomExps QueryArg) - val markDml = mapFormula (toAtomExps DmlRel) - val dnf = normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) - (* If one of the terms in a conjunction leads to a contradiction, which - is represented by [NONE], drop the entire conjunction. *) - val sequenceOption = List.foldr (fn (SOME x, SOME xs) => SOME (x :: xs) | _ => NONE) - (SOME []) - in - List.mapPartial (sequenceOption o map eqsOfClass o equivClasses) dnf - end + val markQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> + (Sql.cmp * atomExp option * atomExp option) formula = + mapFormula (toAtomExps QueryArg) + val markDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> + (Sql.cmp * atomExp option * atomExp option) formula = + mapFormula (toAtomExps DmlRel) + (* No eqs should have key conflicts because no variable is in two + equivalence classes, so the [#1] can be anything. *) + val mergeEqs : (atomExp IntBinaryMap.map option list + -> atomExp IntBinaryMap.map option) = + List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) + (SOME IM.empty) + fun dnf (fQuery, fDml) = + normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) + (* in *) + val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula + * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula + -> atomExp IM.map list = + List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf + (* end *) val rec sqexpToFormula = fn Sql.SqTrue => Combo (Cnf, []) @@ -273,9 +314,7 @@ val rec sqexpToFormula = (* ASK: any other sqexps that can be props? *) | _ => raise Match -val rec queryToFormula = - fn Sql.Query1 {From = tablePairs, Where = NONE, ...} => Combo (Cnf, []) - | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => +fun renameTables tablePairs = let fun renameString table = case List.find (fn (_, t) => table = t) tablePairs of @@ -284,19 +323,47 @@ val rec queryToFormula = val renameSqexp = fn Sql.Field (table, field) => Sql.Field (renameString table, field) | e => e - fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) + fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) in - mapFormula renameAtom (sqexpToFormula e) + mapFormula renameAtom end + +val rec queryToFormula = + fn Sql.Query1 {Where = NONE, ...} => Combo (Cnf, []) + | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => + renameTables tablePairs (sqexpToFormula e) | Sql.Union (q1, q2) => Combo (Dnf, [queryToFormula q1, queryToFormula q2]) -val rec dmlToFormula = - fn Sql.Insert (table, vals) => +fun valsToFormula (table, vals) = Combo (Cnf, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) - | Sql.Delete (_, wher) => sqexpToFormula wher + +val rec dmlToFormula = + fn Sql.Insert tableVals => valsToFormula tableVals + | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) (* TODO: refine formula for the vals part, which could take into account the wher part. *) - | Sql.Update (table, vals, wher) => Combo (Dnf, [dmlToFormula (Sql.Insert (table, vals)), - dmlToFormula (Sql.Delete (table, wher))]) + | Sql.Update (table, vals, wher) => + let + val f = sqexpToFormula wher + fun update (positive, a) = + let + fun updateIfNecessary field = + case List.find (fn (f, _) => field = f) vals of + SOME (_, v) => (if positive then Sql.Eq else Sql.Ne, + Sql.Field (table, field), + v) + | NONE => a + in + case a of + (_, Sql.Field (_, field), _) => updateIfNecessary field + | (_, _, Sql.Field (_, field)) => updateIfNecessary field + | _ => a + end + in + renameTables [(table, "T")] + (Combo (Dnf, [f, + Combo (Cnf, [valsToFormula (table, vals), + mapFormulaSigned true update f])])) + end val rec tablesQuery = fn Sql.Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) @@ -416,7 +483,7 @@ fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file () fun addChecking file = let - fun doExp queryInfo = + fun doExp (queryInfo as (tableToIndices, indexToQuery)) = fn e' as ELet (v, t, queryExp' as (EQuery {query = origQueryText, initial, body, state, tables, exps}, queryLoc), @@ -460,7 +527,7 @@ fun addChecking file = exps = exps}, queryLoc) val (EQuery {query = queryText, ...}, _) = queryExp - (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)); *) + val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)); val args = List.tabulate (numArgs, fn n => (ERel n, loc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE @@ -470,14 +537,15 @@ fun addChecking file = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( bind (Sql.parse Sql.query queryText) (fn queryParsed => - bind (indexOfName v) (fn i => - bind (IM.find (!urlifiedRel0s, i)) (fn urlifiedRel0 => + bind (indexOfName v) (fn index => + bind (IM.find (!urlifiedRel0s, index)) (fn urlifiedRel0 => SOME (wrapLets (ELet (v, t, - cacheWrap (queryExp, i, urlifiedRel0, args), + cacheWrap (queryExp, index, urlifiedRel0, args), incRelsBound 1 (length newVariables) letBody)), - SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, i)) - queryInfo - (tablesQuery queryParsed)))))) + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQuery, index, (queryParsed, numArgs)))))))) in case attempt of SOME pair => pair @@ -486,35 +554,69 @@ fun addChecking file = | ESeq ((EFfiApp ("Sqlcache", "dummy", _), _), (e', _)) => (e', queryInfo) | e' => (e', queryInfo) in - fileMapfold (fn exp => fn state => doExp state exp) file SIMM.empty + fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty) end +val gunk' : (((Sql.cmp * Sql.sqexp * Sql.sqexp) formula) + * ((Sql.cmp * Sql.sqexp * Sql.sqexp) formula)) list ref = ref [] + fun invalidations (nQueryArgs, query, dml) = let val loc = ErrorMsg.dummySpan - val optionToExp = + val optionAtomExpToExp = fn NONE => (ENone stringTyp, loc) - | SOME e => (ESome (stringTyp, (e, loc)), loc) + | SOME e => (ESome (stringTyp, + (case e of + DmlRel n => ERel n + | Prim p => EPrim p + (* TODO: make new type containing only these two. *) + | _ => raise Match, + loc)), + loc) fun eqsToInvalidation eqs = let - fun inv n = if n < 0 then [] else optionToExp (IM.find (eqs, n)) :: inv (n - 1) + fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) in inv (nQueryArgs - 1) end + (* *) + val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = + fn ([], []) => true + | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) + | (SOME x :: xs, SOME y :: ys) => equalAtomExp (x, y) andalso madeRedundantBy (xs, ys) + | _ => false + fun removeRedundant' (xss, yss) = + case xss of + [] => yss + | xs :: xss' => + removeRedundant' (xss', + if List.exists (fn ys => madeRedundantBy (xs, ys)) (xss' @ yss) + then yss + else xs :: yss) + fun removeRedundant xss = removeRedundant' (xss, []) + val eqss = conflictMaps (queryToFormula query, dmlToFormula dml) in - map (map eqsToInvalidation) (conflictMaps (queryToFormula query, dmlToFormula dml)) + gunk' := (queryToFormula query, dmlToFormula dml) :: !gunk'; + (map (map optionAtomExpToExp) o removeRedundant o map eqsToInvalidation) eqss end -fun addFlushing (file, queryInfo) = +val gunk : Mono.exp list list list ref = ref [] + +fun addFlushing (file, queryInfo as (tableToIndices, indexToQuery)) = let - val allIndices : int list = SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] queryInfo - fun flushes indices = map (fn i => ffiAppCache' ("flush", i, [])) indices + val allIndices = SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] tableToIndices + val flushes = map (fn i => ffiAppCache' ("flush", i, [])) val doExp = fn dmlExp as EDml (dmlText, _) => let val indices = case Sql.parse Sql.dml dmlText of - SOME dmlParsed => SIMM.findList (queryInfo, tableDml dmlParsed) + SOME dmlParsed => + map (fn i => ((case IM.find (indexToQuery, i) of + NONE => () + | SOME (queryParsed, numArgs) => + gunk := invalidations (numArgs, queryParsed, dmlParsed) :: !gunk); + i)) (SIMM.findList (tableToIndices, tableDml dmlParsed)) | NONE => allIndices in sequence (flushes indices @ [dmlExp]) -- cgit v1.2.3 From a413fc1a42bf0fbee27c2f22cc8e9ca9b17b9edf Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 24 Nov 2014 20:41:24 -0500 Subject: Add interface to UnionFind. --- src/sqlcache.sml | 1 + src/union_find_fn.sml | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 095a1474..d8169926 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -341,6 +341,7 @@ val rec dmlToFormula = fn Sql.Insert tableVals => valsToFormula tableVals | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) (* TODO: refine formula for the vals part, which could take into account the wher part. *) + (* TODO: use pushNegate instead of mapFormulaSigned? *) | Sql.Update (table, vals, wher) => let val f = sqexpToFormula wher diff --git a/src/union_find_fn.sml b/src/union_find_fn.sml index 42b2d4d7..e6f8d9bf 100644 --- a/src/union_find_fn.sml +++ b/src/union_find_fn.sml @@ -1,4 +1,10 @@ -functor UnionFindFn(K : ORD_KEY) = struct +functor UnionFindFn(K : ORD_KEY) :> sig + type unionFind + val empty : unionFind + val union : unionFind * K.ord_key * K.ord_key -> unionFind + val union' : (K.ord_key * K.ord_key) * unionFind -> unionFind + val classes : unionFind -> K.ord_key list list +end = struct structure M = BinaryMapFn(K) structure S = BinarySetFn(K) -- cgit v1.2.3 From 476f12674420391e24afd1846e176eabe550d36c Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 29 Nov 2014 03:37:59 -0500 Subject: Basic field-resolution invalidation. --- caching-tests/test.db | Bin 5120 -> 0 bytes caching-tests/test.sql | 16 ---- caching-tests/test.ur | 66 ++++++++-------- caching-tests/test.urs | 8 +- src/cjr_print.sml | 28 +++++-- src/cjrize.sml | 10 +-- src/iflow.sml | 10 ++- src/jscomp.sml | 19 ++--- src/mono.sml | 7 +- src/mono_opt.sml | 25 +++--- src/mono_print.sml | 8 +- src/mono_util.sml | 23 +++--- src/monoize.sig | 2 + src/monoize.sml | 38 +++++---- src/sqlcache.sml | 211 +++++++++++++++++++++++++++---------------------- src/urweb.lex | 14 ++-- 16 files changed, 266 insertions(+), 219 deletions(-) delete mode 100644 caching-tests/test.db delete mode 100644 caching-tests/test.sql (limited to 'src') diff --git a/caching-tests/test.db b/caching-tests/test.db deleted file mode 100644 index a4661341..00000000 Binary files a/caching-tests/test.db and /dev/null differ diff --git a/caching-tests/test.sql b/caching-tests/test.sql deleted file mode 100644 index 7ade7278..00000000 --- a/caching-tests/test.sql +++ /dev/null @@ -1,16 +0,0 @@ -CREATE TABLE uw_Test_foo01(uw_id int8 NOT NULL, uw_bar text NOT NULL, - PRIMARY KEY (uw_id) - - ); - - CREATE TABLE uw_Test_foo10(uw_id int8 NOT NULL, uw_bar text NOT NULL, - PRIMARY KEY (uw_id) - - ); - - CREATE TABLE uw_Test_tab(uw_id int8 NOT NULL, uw_val int8 NOT NULL, - PRIMARY KEY (uw_id) - - ); - - \ No newline at end of file diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 931612bc..2722bcdc 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -11,26 +11,26 @@ fun cache01 () = | Some row => {[row.Foo01.Bar]}} -fun cache10 () = - res <- queryX (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42) - (fn row => {[row.Foo10.Bar]}); - return - Reading 2. - {res} - +(* fun cache10 () = *) +(* res <- queryX (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42) *) +(* (fn row => {[row.Foo10.Bar]}); *) +(* return *) +(* Reading 2. *) +(* {res} *) +(* *) -fun cache11 () = - res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); - bla <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); - return - Reading 1 and 2. - {case res of - None => ? - | Some row => {[row.Foo01.Bar]}} - {case bla of - None => ? - | Some row => {[row.Foo10.Bar]}} - +(* fun cache11 () = *) +(* res <- oneOrNoRows (SELECT foo01.Bar FROM foo01 WHERE foo01.Id = 42); *) +(* bla <- oneOrNoRows (SELECT foo10.Bar FROM foo10 WHERE foo10.Id = 42); *) +(* return *) +(* Reading 1 and 2. *) +(* {case res of *) +(* None => ? *) +(* | Some row => {[row.Foo01.Bar]}} *) +(* {case bla of *) +(* None => ? *) +(* | Some row => {[row.Foo10.Bar]}} *) +(* *) fun flush01 () = dml (INSERT INTO foo01 (Id, Bar) VALUES (42, "baz01")); @@ -39,18 +39,18 @@ fun flush01 () = Flushed 1! -fun flush10 () = - dml (UPDATE foo10 SET Bar = "baz10" WHERE Id = 42); - return - Flushed 2! - +(* fun flush10 () = *) +(* dml (UPDATE foo10 SET Bar = "baz10" WHERE Id = 42); *) +(* return *) +(* Flushed 2! *) +(* *) -fun flush11 () = - dml (UPDATE foo01 SET Bar = "baz11" WHERE Id = 42); - dml (UPDATE foo10 SET Bar = "baz11" WHERE Id = 42); - return - Flushed 1 and 2! - +(* fun flush11 () = *) +(* dml (UPDATE foo01 SET Bar = "baz11" WHERE Id = 42); *) +(* dml (UPDATE foo10 SET Bar = "baz11" WHERE Id = 42); *) +(* return *) +(* Flushed 1 and 2! *) +(* *) fun cache id = res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); @@ -63,9 +63,9 @@ fun cache id = fun flush id = res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); - dml (case res of - None => (INSERT INTO tab (Id, Val) VALUES ({[id]}, 0)) - | Some row => (UPDATE tab SET Val = {[row.Tab.Val + 1]} WHERE Id = {[id]})); + (case res of + None => dml (INSERT INTO tab (Id, Val) VALUES ({[id]}, 0)) + | Some row => dml (UPDATE tab SET Val = {[row.Tab.Val + 1]} WHERE Id = {[id]})); return (* Flushed {[id]}! *) {case res of diff --git a/caching-tests/test.urs b/caching-tests/test.urs index ace4ba28..30bff733 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -1,8 +1,8 @@ val cache01 : unit -> transaction page -val cache10 : unit -> transaction page -val cache11 : unit -> transaction page +(* val cache10 : unit -> transaction page *) +(* val cache11 : unit -> transaction page *) val flush01 : unit -> transaction page -val flush10 : unit -> transaction page -val flush11 : unit -> transaction page +(* val flush10 : unit -> transaction page *) +(* val flush11 : unit -> transaction page *) val cache : int -> transaction page val flush : int -> transaction page diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 56310b81..81dfefaa 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3410,14 +3410,22 @@ fun p_file env (ds, ps) = fun paramRepeatInit itemi sep = if params = 0 then "" else sep ^ paramRepeat itemi sep val args = paramRepeatInit (fn p => "uw_Basis_string p" ^ p) ", " - val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" ^ p ^ " = NULL;") "\n" + val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" + ^ p ^ " = NULL;") + "\n" val sets = paramRepeat (fn p => "param" ^ i ^ "_" ^ p - ^ " = strdup(p" ^ p ^ ");") "\n" - val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") "\n" - (* Starting || makes logic easier when there are no parameters. *) + ^ " = strdup(p" ^ p ^ ");") + "\n" + val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") + "\n" val eqs = paramRepeatInit (fn p => "strcmp(param" ^ i ^ "_" ^ p ^ ", p" ^ p ^ ")") " || " + (* Using [!=] instead of [==] to mimic [strcmp]. *) + val eqsNull = paramRepeatInit (fn p => "(p" ^ p ^ " == NULL || " + ^ "!strcmp(param" ^ i ^ "_" + ^ p ^ ", p" ^ p ^ "))") + " && " in box [string "static char *cacheQuery", string i, string " = NULL;", @@ -3471,13 +3479,21 @@ fun p_file env (ds, ps) = newline, string "static uw_unit uw_Sqlcache_flush", string i, - string "(uw_context ctx) {\n free(cacheQuery", + string "(uw_context ctx", + string args, + string ") {\n if (cacheQuery", + string i, + string " != NULL", + string eqsNull, + string ") {\n free(cacheQuery", string i, string ");\n cacheQuery", string i, string " = NULL;\n puts(\"SQLCACHE: flushed ", string i, - string ".\");\n return uw_unit_v;\n };", + string ".\");}\n else { puts(\"SQLCACHE: keeping ", + string i, + string "\"); } return uw_unit_v;\n };", newline, newline] end) diff --git a/src/cjrize.sml b/src/cjrize.sml index 11174162..b20d6d22 100644 --- a/src/cjrize.sml +++ b/src/cjrize.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -431,7 +431,7 @@ fun cifyExp (eAll as (e, loc), sm) = | L.EClosure _ => (ErrorMsg.errorAt loc "Nested closure remains in code generation"; (dummye, sm)) - | L.EQuery {exps, tables, state, query, body, initial} => + | L.EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => let val (exps', sm) = ListUtil.foldlMap (fn ((x, t), sm) => let @@ -586,7 +586,7 @@ fun cifyDecl ((d, loc), sm) = let val (vis, sm) = ListUtil.foldlMap (fn ((x, n, t, e, _), sm) => - let + let val (t, sm) = cifyTyp (t, sm) fun unravel (tAll as (t, _), eAll as (e, _)) = @@ -601,7 +601,7 @@ fun cifyDecl ((d, loc), sm) = (ErrorMsg.errorAt loc "Function isn't explicit at code generation"; ([], tAll, eAll)) | _ => ([], tAll, eAll) - + val (args, ran, e) = unravel (t, e) val (e, sm) = cifyExp (e, sm) in @@ -610,7 +610,7 @@ fun cifyDecl ((d, loc), sm) = sm vis in (SOME (L'.DFunRec vis, loc), NONE, sm) - end + end | L.DExport (ek, s, n, ts, t, b) => let diff --git a/src/iflow.sml b/src/iflow.sml index f68d8f72..b8346baa 100644 --- a/src/iflow.sml +++ b/src/iflow.sml @@ -1870,14 +1870,15 @@ val namer = MonoUtil.File.map {typ = fn t => t, case e of EDml (e, fm) => nameSubexps (fn (_, e') => (EDml (e', fm), #2 e)) e - | EQuery {exps, tables, state, query, body, initial} => + | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => nameSubexps (fn (liftBy, e') => (EQuery {exps = exps, tables = tables, state = state, query = e', body = mliftExpInExp liftBy 2 body, - initial = mliftExpInExp liftBy 0 initial}, + initial = mliftExpInExp liftBy 0 initial, + sqlcacheInfo = sqlcacheInfo}, #2 query)) query | _ => e, decl = fn d => d} @@ -2070,11 +2071,12 @@ fun check (file : file) = | ESeq (e1, e2) => (ESeq (doExp env e1, doExp env e2), loc) | ELet (x, t, e1, e2) => (ELet (x, t, doExp env e1, doExp (Unknown :: env) e2), loc) | EClosure (n, es) => (EClosure (n, map (doExp env) es), loc) - | EQuery {exps, tables, state, query, body, initial} => + | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => (EQuery {exps = exps, tables = tables, state = state, query = doExp env query, body = doExp (Unknown :: Unknown :: env) body, - initial = doExp env initial}, loc) + initial = doExp env initial, + sqlcacheInfo = sqlcacheInfo}, loc) | EDml (e1, mode) => (case parse dml e1 of NONE => () diff --git a/src/jscomp.sml b/src/jscomp.sml index 1a476739..a4ee95f0 100644 --- a/src/jscomp.sml +++ b/src/jscomp.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -195,7 +195,7 @@ fun process (file : file) = str loc "}"])], {disc = t, result = s}), loc) val body = (EAbs ("x", t, s, body), loc) - + val st = {decls = ("jsify", n', (TFun (t, s), loc), body, "jsify") :: #decls st, script = #script st, @@ -575,7 +575,7 @@ fun process (file : file) = val e = String.translate (fn #"'" => "\\'" | #"\\" => "\\\\" | ch => String.str ch) e - + val sc = "urfuncs[" ^ Int.toString n ^ "] = {c:\"t\",f:'" ^ e ^ "'};\n" in @@ -799,7 +799,7 @@ fun process (file : file) = | _ => default () in seek (e', [x]) - end + end | ECase (e', pes, _) => let @@ -1030,7 +1030,7 @@ fun process (file : file) = | ERel _ => (e, st) | ENamed _ => (e, st) | ECon (_, _, NONE) => (e, st) - | ECon (dk, pc, SOME e) => + | ECon (dk, pc, SOME e) => let val (e, st) = exp outer (e, st) in @@ -1082,7 +1082,7 @@ fun process (file : file) = in ((EBinop (bi, s, e1, e2), loc), st) end - + | ERecord xets => let val (xets, st) = ListUtil.foldlMap (fn ((x, e, t), st) => @@ -1176,7 +1176,7 @@ fun process (file : file) = ((EClosure (n, es), loc), st) end - | EQuery {exps, tables, state, query, body, initial} => + | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => let val row = exps @ map (fn (x, xts) => (x, (TRecord xts, loc))) tables val row = ListMergeSort.sort (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER) row @@ -1187,7 +1187,8 @@ fun process (file : file) = val (initial, st) = exp outer (initial, st) in ((EQuery {exps = exps, tables = tables, state = state, - query = query, body = body, initial = initial}, loc), st) + query = query, body = body, initial = initial, + sqlcacheInfo = sqlcacheInfo}, loc), st) end | EDml (e, mode) => let @@ -1257,7 +1258,7 @@ fun process (file : file) = in ((ESignalSource e, loc), st) end - + | EServerCall (e1, t, ef, fm) => let val (e1, st) = exp outer (e1, st) diff --git a/src/mono.sml b/src/mono.sml index 1e402e57..5185e48c 100644 --- a/src/mono.sml +++ b/src/mono.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -107,7 +107,8 @@ datatype exp' = state : typ, query : exp, (* exp of string type containing sql query *) body : exp, - initial : exp } + initial : exp, + sqlcacheInfo : exp } | EDml of exp * failure_mode | ENextval of exp | ESetval of exp * exp @@ -119,7 +120,7 @@ datatype exp' = | ESignalReturn of exp | ESignalBind of exp * exp | ESignalSource of exp - + | EServerCall of exp * typ * effect * failure_mode | ERecv of exp * typ | ESleep of exp diff --git a/src/mono_opt.sml b/src/mono_opt.sml index d1e5ce55..97f78d3d 100644 --- a/src/mono_opt.sml +++ b/src/mono_opt.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -166,7 +166,7 @@ fun exp e = e | EFfiApp ("Basis", "strcat", [(e1, _), (e2, _)]) => exp (EStrcat (e1, e2)) - + | EStrcat ((EPrim (Prim.String (Prim.Html, s1)), loc), (EPrim (Prim.String (Prim.Html, s2)), _)) => let val s = @@ -179,7 +179,7 @@ fun exp e = in EPrim (Prim.String (Prim.Html, s)) end - + | EStrcat ((EPrim (Prim.String (_, s1)), loc), (EPrim (Prim.String (_, s2)), _)) => EPrim (Prim.String (Prim.Normal, s1 ^ s2)) @@ -397,18 +397,20 @@ fun exp e = initial = (EPrim (Prim.String (k, "")), _), body = (EStrcat ((EPrim (Prim.String (_, s)), _), (EStrcat ((ERel 0, _), - e'), _)), _)}, loc) => + e'), _)), _), + sqlcacheInfo}, loc) => if (case k of Prim.Normal => s = "" | Prim.Html => CharVector.all Char.isSpace s) then EQuery {exps = exps, tables = tables, query = query, state = (TRecord [], loc), initial = (ERecord [], loc), - body = (optExp (EWrite e', loc), loc)} + body = (optExp (EWrite e', loc), loc), + sqlcacheInfo = Monoize.urlifiedUnit} else e | EWrite (EQuery {exps, tables, state, query, initial = (EPrim (Prim.String (_, "")), _), - body}, loc) => + body, sqlcacheInfo}, loc) => let fun passLets (depth, (e', _), lets) = case e' of @@ -423,7 +425,8 @@ fun exp e = EQuery {exps = exps, tables = tables, query = query, state = (TRecord [], loc), initial = (ERecord [], loc), - body = body} + body = body, + sqlcacheInfo = Monoize.urlifiedUnit} end else e @@ -532,7 +535,7 @@ fun exp e = else ENone (TFfi ("Basis", "string"), loc)) - | EFfiApp ("Basis", "checkString", [((EPrim (Prim.String (_, s)), loc), _)]) => + | EFfiApp ("Basis", "checkString", [((EPrim (Prim.String (_, s)), loc), _)]) => let fun uwify (cs, acc) = case cs of @@ -560,7 +563,7 @@ fun exp e = EPrim (Prim.String (Prim.Normal, s)) end - | EFfiApp ("Basis", "viewify", [((EPrim (Prim.String (_, s)), loc), _)]) => + | EFfiApp ("Basis", "viewify", [((EPrim (Prim.String (_, s)), loc), _)]) => let fun uwify (cs, acc) = case cs of @@ -585,7 +588,7 @@ fun exp e = EPrim (Prim.String (Prim.Normal, s)) end - | EFfiApp ("Basis", "unAs", [((EPrim (Prim.String (_, s)), _), _)]) => + | EFfiApp ("Basis", "unAs", [((EPrim (Prim.String (_, s)), _), _)]) => EPrim (Prim.String (Prim.Normal, unAs s)) | EFfiApp ("Basis", "unAs", [(e', _)]) => let @@ -620,7 +623,7 @@ fun exp e = EFfiApp ("Basis", "attrifyChar_w", [e]) | EBinop (_, "+", (EPrim (Prim.Int n1), _), (EPrim (Prim.Int n2), _)) => EPrim (Prim.Int (Int64.+ (n1, n2))) - + | _ => e and optExp e = #1 (U.Exp.map {typ = typ, exp = exp} e) diff --git a/src/mono_print.sml b/src/mono_print.sml index c81b362a..0ff51f37 100644 --- a/src/mono_print.sml +++ b/src/mono_print.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -310,7 +310,7 @@ fun p_exp' par env (e, _) = p_exp env e]) es, string ")"] - | EQuery {exps, tables, state, query, body, initial} => + | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => box [string "query[", p_list (fn (x, t) => box [string x, space, string ":", space, p_typ env t]) exps, string "] [", @@ -391,7 +391,7 @@ fun p_vali env (x, n, t, e, s) = string "__", string (Int.toString n)] else - string x + string x in box [xp, space, @@ -541,7 +541,7 @@ fun p_decl env (dAll as (d, _) : decl) = space, p_policy env p] | DOnError _ => string "ONERROR" - + fun p_file env (file, _) = let val (pds, _) = ListUtil.foldlMap (fn (d, env) => diff --git a/src/mono_util.sml b/src/mono_util.sml index fd80c64f..ba10ad32 100644 --- a/src/mono_util.sml +++ b/src/mono_util.sml @@ -314,7 +314,7 @@ fun mapfoldB {typ = fc, exp = fe, bind} = fn es' => (EClosure (n, es'), loc)) - | EQuery {exps, tables, state, query, body, initial} => + | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => S.bind2 (ListUtil.mapfold (fn (x, t) => S.map2 (mft t, fn t' => (x, t'))) exps, @@ -334,15 +334,20 @@ fun mapfoldB {typ = fc, exp = fe, bind} = RelE ("acc", dummyt))) body, fn body' => - S.map2 (mfe ctx initial, + (* ASK: is this the right thing to do? *) + S.bind2 (mfe ctx initial, fn initial' => - (EQuery {exps = exps', - tables = tables', - state = state', - query = query', - body = body', - initial = initial'}, - loc))))))) + S.map2 (mfe (bind (ctx, RelE ("queryResult", dummyt))) + sqlcacheInfo, + fn sqlcacheInfo' => + (EQuery {exps = exps', + tables = tables', + state = state', + query = query', + body = body', + initial = initial', + sqlcacheInfo = sqlcacheInfo}, + loc)))))))) | EDml (e, fm) => S.map2 (mfe ctx e, diff --git a/src/monoize.sig b/src/monoize.sig index 951db01b..549bf6ee 100644 --- a/src/monoize.sig +++ b/src/monoize.sig @@ -31,4 +31,6 @@ signature MONOIZE = sig val liftExpInExp : int -> Mono.exp -> Mono.exp + val urlifiedUnit : Mono.exp + end diff --git a/src/monoize.sml b/src/monoize.sml index 2d225813..5c314c54 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -681,6 +681,16 @@ fun fooifyExp fk env = val attrifyExp = fooifyExp Attr val urlifyExp = fooifyExp Url +val urlifiedUnit = + let + val loc = ErrorMsg.dummySpan + (* Urlifies [ERel 0] to match the [sqlcacheInfo] field of [EQuery]s. *) + val (urlified, _) = urlifyExp CoreEnv.empty (Fm.empty 0) + ((L'.ERel 0, loc), (L'.TRecord [], loc)) + in + urlified + end + datatype 'a failable_search = Found of 'a | NotFound @@ -1957,26 +1967,24 @@ fun monoExp (env, st, fm) (all as (e, loc)) = (L'.TFun (un, state), loc)), loc)), loc) - val body'' = (L'.EApp ( + val body' = (L'.EApp ( (L'.EApp ( (L'.EApp ((L'.ERel 4, loc), (L'.ERel 1, loc)), loc), (L'.ERel 0, loc)), loc), (L'.ERecord [], loc)), loc) - val body' = (L'.EQuery {exps = exps, - tables = tables, - state = state, - query = (L'.ERel 3, loc), - body = body'', - initial = (L'.ERel 1, loc)}, - loc) - val (body, fm) = if Settings.getSqlcache () then - let - val (urlifiedRel0, fm) = urlifyExp env fm ((L'.ERel 0, loc), state) - in - (Sqlcache.instrumentQuery (body', urlifiedRel0), fm) - end - else (body', fm) + val (urlifiedRel0, fm) = urlifyExp env fm ((L'.ERel 0, loc), state) + val body = (L'.EQuery {exps = exps, + tables = tables, + state = state, + query = (L'.ERel 3, loc), + body = body', + initial = (L'.ERel 1, loc), + sqlcacheInfo = urlifiedRel0}, + loc) + val body = if Settings.getSqlcache () + then Sqlcache.instrumentQuery (body, urlifiedRel0) + else body in ((L'.EAbs ("q", s, (L'.TFun (ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc)), loc), (L'.EAbs ("f", ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc), diff --git a/src/sqlcache.sml b/src/sqlcache.sml index d8169926..b555ca7a 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -176,12 +176,10 @@ fun normalize' (negate : 'atom -> 'atom) (norm : normalForm) = fun normalize negate norm = normalize' negate norm o flatten o pushNegate negate false -fun mapFormulaSigned positive mf = - fn Atom x => Atom (mf (positive, x)) - | Negate f => Negate (mapFormulaSigned (not positive) mf f) - | Combo (n, fs) => Combo (n, map (mapFormulaSigned positive mf) fs) - -fun mapFormula mf = mapFormulaSigned true (fn (_, x) => mf x) +fun mapFormula mf = + fn Atom x => Atom (mf x) + | Negate f => Negate (mapFormula mf f) + | Combo (n, fs) => Combo (n, map (mapFormula mf) fs) (* SQL analysis. *) @@ -225,11 +223,10 @@ val compare = end structure UF = UnionFindFn(AtomExpKey) - -(* val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) -(* * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) -(* -> Mono.exp' IM.map list = *) -(* let *) +val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula + * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula + -> atomExp IM.map list = + let val toKnownEquality = (* [NONE] here means unkown. Anything that isn't a comparison between two knowns shouldn't be used, and simply dropping unused terms is @@ -297,12 +294,12 @@ structure UF = UnionFindFn(AtomExpKey) (SOME IM.empty) fun dnf (fQuery, fDml) = normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) - (* in *) - val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula - * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula - -> atomExp IM.map list = - List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf - (* end *) + in + (* val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) + (* * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) + (* -> atomExp IM.map list = *) + List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf + end val rec sqexpToFormula = fn Sql.SqTrue => Combo (Cnf, []) @@ -338,32 +335,21 @@ fun valsToFormula (table, vals) = Combo (Cnf, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) val rec dmlToFormula = - fn Sql.Insert tableVals => valsToFormula tableVals + fn Sql.Insert (table, vals) => valsToFormula (table, vals) | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) - (* TODO: refine formula for the vals part, which could take into account the wher part. *) - (* TODO: use pushNegate instead of mapFormulaSigned? *) | Sql.Update (table, vals, wher) => let - val f = sqexpToFormula wher - fun update (positive, a) = - let - fun updateIfNecessary field = - case List.find (fn (f, _) => field = f) vals of - SOME (_, v) => (if positive then Sql.Eq else Sql.Ne, - Sql.Field (table, field), - v) - | NONE => a - in - case a of - (_, Sql.Field (_, field), _) => updateIfNecessary field - | (_, _, Sql.Field (_, field)) => updateIfNecessary field - | _ => a - end + val fWhere = sqexpToFormula wher + val fVals = valsToFormula (table, vals) + (* TODO: don't use field name hack. *) + val markField = + fn Sql.Field (t, v) => Sql.Field (t, v ^ "*") + | e => e + val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) in renameTables [(table, "T")] - (Combo (Dnf, [f, - Combo (Cnf, [valsToFormula (table, vals), - mapFormulaSigned true update f])])) + (Combo (Dnf, [Combo (Cnf, [fVals, mark fWhere]), + Combo (Cnf, [mark fVals, fWhere])])) end val rec tablesQuery = @@ -482,54 +468,62 @@ fun fileMapfold doExp file start = fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file ()) +fun factorOutNontrivial text = + let + val loc = ErrorMsg.dummySpan + fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val chunks = Sql.chunkify text + val (newText, newVariables) = + (* Important that this is foldr (to oppose foldl below). *) + List.foldr + (fn (chunk, (qText, newVars)) => + (* Variable bound to the head of newBs will have the lowest index. *) + case chunk of + Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Sql.Exp e => + let + val n = length newVars + in + (* This is the (n + 1)th new variable, so there are + already n new variables bound, so we increment + indices by n. *) + (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) + end + | Sql.String s => (strcat (stringExp s, qText), newVars)) + (stringExp "", []) + chunks + fun wrapLets e' = + (* Important that this is foldl (to oppose foldr above). *) + List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) + e' + newVariables + val numArgs = length newVariables + in + (newText, wrapLets, numArgs) + end + fun addChecking file = let - fun doExp (queryInfo as (tableToIndices, indexToQuery)) = + fun doExp (queryInfo as (tableToIndices, indexToQueryNumArgs)) = fn e' as ELet (v, t, - queryExp' as (EQuery {query = origQueryText, - initial, body, state, tables, exps}, queryLoc), + (EQuery {query = origQueryText, + initial, body, state, tables, exps, sqlcacheInfo}, queryLoc), letBody) => let - val loc = ErrorMsg.dummySpan - val chunks = Sql.chunkify origQueryText - fun strcat (e1, e2) = (EStrcat (e1, e2), loc) - val (newQueryText, newVariables) = - (* Important that this is foldr (to oppose foldl below). *) - List.foldr - (fn (chunk, (qText, newVars)) => - (* Variable bound to the head of newBs will have the lowest index. *) - case chunk of - Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) - | Sql.Exp e => - let - val n = length newVars - in - (* This is the (n + 1)th new variable, so - there are already n new variables bound, - so we increment indices by n. *) - (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) - end - | Sql.String s => (strcat (stringExp s, qText), newVars)) - (stringExp "", []) - chunks - fun wrapLets e' = - (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) - e' - newVariables - val numArgs = length newVariables + val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText (* Increment once for each new variable just made. *) - val queryExp = incRels (length newVariables) + val queryExp = incRels numArgs (EQuery {query = newQueryText, initial = initial, body = body, state = state, tables = tables, - exps = exps}, + exps = exps, + sqlcacheInfo = sqlcacheInfo}, queryLoc) val (EQuery {query = queryText, ...}, _) = queryExp - val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)); - val args = List.tabulate (numArgs, fn n => (ERel n, loc)) + val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) + val args = List.tabulate (numArgs, fn n => (ERel n, ErrorMsg.dummySpan)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE (* DEBUG: set first boolean argument to true to turn on printing. *) @@ -542,11 +536,11 @@ fun addChecking file = bind (IM.find (!urlifiedRel0s, index)) (fn urlifiedRel0 => SOME (wrapLets (ELet (v, t, cacheWrap (queryExp, index, urlifiedRel0, args), - incRelsBound 1 (length newVariables) letBody)), + incRelsBound 1 numArgs letBody)), (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) tableToIndices (tablesQuery queryParsed), - IM.insert (indexToQuery, index, (queryParsed, numArgs)))))))) + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)))))))) in case attempt of SOME pair => pair @@ -558,10 +552,12 @@ fun addChecking file = fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty) end +val gunk : (Sql.query * Sql.dml * Mono.exp list list) list ref = ref [] + val gunk' : (((Sql.cmp * Sql.sqexp * Sql.sqexp) formula) * ((Sql.cmp * Sql.sqexp * Sql.sqexp) formula)) list ref = ref [] -fun invalidations (nQueryArgs, query, dml) = +fun invalidations ((query, numArgs), dml) = let val loc = ErrorMsg.dummySpan val optionAtomExpToExp = @@ -578,9 +574,10 @@ fun invalidations (nQueryArgs, query, dml) = let fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) in - inv (nQueryArgs - 1) + inv (numArgs - 1) end - (* *) + (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here + represents unknown, which means a wider invalidation. *) val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = fn ([], []) => true | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) @@ -601,39 +598,67 @@ fun invalidations (nQueryArgs, query, dml) = (map (map optionAtomExpToExp) o removeRedundant o map eqsToInvalidation) eqss end -val gunk : Mono.exp list list list ref = ref [] -fun addFlushing (file, queryInfo as (tableToIndices, indexToQuery)) = +(* gunk := (queryParsed, dmlParsed, invalidations (numArgs, queryParsed, dmlParsed)) :: !gunk); *) + +fun addFlushing (file, (tableToIndices, indexToQueryNumArgs)) = let - val allIndices = SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] tableToIndices - val flushes = map (fn i => ffiAppCache' ("flush", i, [])) + (* TODO: write this. *) + val allInvs = () (* SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] tableToIndices *) + val flushes = List.concat o + map (fn (i, argss) => + map (fn args => + ffiAppCache' ("flush", i, + map (fn arg => (arg, stringTyp)) args)) argss) val doExp = - fn dmlExp as EDml (dmlText, _) => + fn EDml (origDmlText, failureMode) => let - val indices = + val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText + val dmlText = incRels numArgs newDmlText + val dmlExp = EDml (dmlText, failureMode) + val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) + val invs = case Sql.parse Sql.dml dmlText of SOME dmlParsed => - map (fn i => ((case IM.find (indexToQuery, i) of - NONE => () - | SOME (queryParsed, numArgs) => - gunk := invalidations (numArgs, queryParsed, dmlParsed) :: !gunk); - i)) (SIMM.findList (tableToIndices, tableDml dmlParsed)) - | NONE => allIndices + map (fn i => (case IM.find (indexToQueryNumArgs, i) of + SOME queryNumArgs => + (i, invalidations (queryNumArgs, dmlParsed)) + (* TODO: fail more gracefully. *) + | NONE => raise Match)) + (SIMM.findList (tableToIndices, tableDml dmlParsed)) + (* TODO: fail more gracefully. *) + | NONE => raise Match in - sequence (flushes indices @ [dmlExp]) + wrapLets (sequence (flushes invs @ [dmlExp])) end | e' => e' in fileMap doExp file end +val inlineSql = + let + val doExp = + (* TODO: EQuery, too? *) + (* ASK: should this live in [MonoOpt]? *) + fn EDml ((ECase (disc, cases, {disc = dTyp, ...}), loc), failureMode) => + let + val newCases = map (fn (p, e) => (p, (EDml (e, failureMode), loc))) cases + in + ECase (disc, newCases, {disc = dTyp, result = (TRecord [], loc)}) + end + | e => e + in + fileMap doExp + end + fun go file = let val () = Sql.sqlcacheMode := true - val file' = addFlushing (addChecking file) + val file' = addFlushing (addChecking (inlineSql file)) val () = Sql.sqlcacheMode := false in - file' + file' end end diff --git a/src/urweb.lex b/src/urweb.lex index 0d316ed2..785f7a81 100644 --- a/src/urweb.lex +++ b/src/urweb.lex @@ -18,7 +18,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -50,7 +50,7 @@ in else (); commentLevel := !commentLevel + 1) - + fun exitComment () = (ignore (commentLevel := !commentLevel - 1); if !commentLevel = 0 then @@ -58,15 +58,15 @@ in else ()) - fun eof () = - let + fun eof () = + let val pos = ErrorMsg.lastLineStart () in if !commentLevel > 0 then ErrorMsg.errorAt' (!commentPos, !commentPos) "Unterminated comment" else (); - Tokens.EOF (pos, pos) + Tokens.EOF (pos, pos) end end @@ -177,7 +177,7 @@ fun unescape loc s = %s COMMENT STRING CHAR XML XMLTAG; id = [a-z_][A-Za-z0-9_']*; -xmlid = [A-Za-z][A-Za-z0-9-_]*; +xmlid = [A-Za-z][A-Za-z0-9_-]*; cid = [A-Z][A-Za-z0-9_]*; ws = [\ \t\012\r]; intconst = [0-9]+; @@ -300,7 +300,7 @@ xint = x[0-9a-fA-F][0-9a-fA-F]; Tokens.XML_END (yypos, yypos + size yytext)) else Tokens.END_TAG (id, yypos, yypos + size yytext) - | _ => + | _ => Tokens.END_TAG (id, yypos, yypos + size yytext) end); -- cgit v1.2.3 From 219524359a25417b9e140130ab77a9a330c41012 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 29 Nov 2014 04:34:41 -0500 Subject: Remove Sqlcache urlification hack. --- src/cjr_print.sml | 19 ++++----- src/monoize.sml | 3 -- src/sqlcache.sml | 113 +++++++++++++++++------------------------------------- 3 files changed, 46 insertions(+), 89 deletions(-) (limited to 'src') diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 81dfefaa..73e0316d 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3394,7 +3394,6 @@ fun p_file env (ds, ps) = newline, (* For sqlcache. *) - (* TODO: also record between Cache.check and Cache.store. *) box (List.map (fn {index, params} => let val i = Int.toString index @@ -3440,14 +3439,16 @@ fun p_file env (ds, ps) = string i, string "(uw_context ctx", string args, - string ") {\n puts(\"SQLCACHE: checked ", - string i, - string ".\");\n if (cacheQuery", + string ") {\n if (cacheQuery", string i, (* ASK: is returning the pointer okay? Should we duplicate? *) string " == NULL", string eqs, - string ") {\n puts(\"miss D:\");\n uw_recordingStart(ctx);\n return NULL;\n } else {\n puts(\"hit :D\");\n uw_write(ctx, cacheWrite", + string ") {\n puts(\"SQLCACHE: miss ", + string i, + string ".\");\n uw_recordingStart(ctx);\n return NULL;\n } else {\n puts(\"SQLCACHE: hit ", + string i, + string ".\");\n uw_write(ctx, cacheWrite", string i, string ");\n return cacheQuery", string i, @@ -3473,7 +3474,7 @@ fun p_file env (ds, ps) = newline, string sets, newline, - string "puts(\"SQLCACHE: stored ", + string "puts(\"SQLCACHE: store ", string i, string ".\");\n return uw_unit_v;\n };", newline, @@ -3489,11 +3490,11 @@ fun p_file env (ds, ps) = string i, string ");\n cacheQuery", string i, - string " = NULL;\n puts(\"SQLCACHE: flushed ", + string " = NULL;\n puts(\"SQLCACHE: flush ", string i, - string ".\");}\n else { puts(\"SQLCACHE: keeping ", + string ".\");}\n else { puts(\"SQLCACHE: keep ", string i, - string "\"); } return uw_unit_v;\n };", + string ".\"); } return uw_unit_v;\n };", newline, newline] end) diff --git a/src/monoize.sml b/src/monoize.sml index 5c314c54..fa69b3af 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -1982,9 +1982,6 @@ fun monoExp (env, st, fm) (all as (e, loc)) = initial = (L'.ERel 1, loc), sqlcacheInfo = urlifiedRel0}, loc) - val body = if Settings.getSqlcache () - then Sqlcache.instrumentQuery (body, urlifiedRel0) - else body in ((L'.EAbs ("q", s, (L'.TFun (ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc)), loc), (L'.EAbs ("f", ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc), diff --git a/src/sqlcache.sml b/src/sqlcache.sml index b555ca7a..13a47c9d 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -16,7 +16,7 @@ fun getFfiInfo () = !ffiInfo (* Some FFIs have writing as their only effect, which the caching records. *) val ffiEffectful = - (* TODO: have this less hard-coded. *) + (* ASK: how can this be less hard-coded? *) let val fs = SS.fromList ["htmlifyInt_w", "htmlifyFloat_w", @@ -46,7 +46,7 @@ fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : Mono.e false, then expression is definitely not effectful if effs is fully populated. The intended pattern is to use this a number of times equal to the number of declarations in a file, Bellman-Ford style. *) - (* TODO: make incrementing of bound less janky, probably by using MonoUtil + (* TODO: make incrementing of bound less janky, probably by using [MonoUtil] instead of all this. *) let (* DEBUG: remove printing when done. *) @@ -253,7 +253,9 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) - (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. *) + (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. + This would involve guarding the invalidation with a check for the + relevant comparisons. *) | (_, eqso) => eqso val eqsOfClass : atomExp list -> atomExp IM.map option = List.foldl accumulateEqs (SOME IM.empty) @@ -295,9 +297,6 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula fun dnf (fQuery, fDml) = normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) in - (* val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) - (* * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula *) - (* -> atomExp IM.map list = *) List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf end @@ -402,63 +401,27 @@ fun incRelsBound bound inc = val incRels = incRelsBound 0 -(* Filled in by instrumentQuery during [Monoize], used during [Sqlcache]. *) -val urlifiedRel0s : Mono.exp IM.map ref = ref IM.empty - -(* Used by [Monoize]. *) -val instrumentQuery = +fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = let - val nextQuery = ref 0 - fun iq (query, urlifiedRel0) = - case query of - (EQuery {state = typ, ...}, loc) => - let - val i = !nextQuery before nextQuery := !nextQuery + 1 - in - urlifiedRel0s := IM.insert (!urlifiedRel0s, i, urlifiedRel0); - (ELet (varPrefix ^ Int.toString i, typ, query, - (* Uses a dummy FFI call to keep the urlified expression around, which - in turn keeps the declarations required for urlification safe from - [MonoShake]. The dummy call is removed during [Sqlcache]. *) - (* TODO: thread a [Monoize.Fm.t] through this module. *) - (ESeq ((EFfiApp ("Sqlcache", - "dummy", - [(urlifiedRel0, stringTyp)]), - loc), - (ERel 0, loc)), - loc)), - loc) - end - | _ => raise Match + val () = ffiInfo := {index = i, params = length args} :: !ffiInfo + val loc = ErrorMsg.dummySpan + (* We ensure before this step that all arguments aren't effectful. + by turning them into local variables as needed. *) + val argTyps = map (fn e => (e, stringTyp)) args + val argTypsInc = map (fn (e, typ) => (incRels 1 e, typ)) argTyps + val check = ffiAppCache ("check", i, argTyps) + val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argTypsInc) + val rel0 = (ERel 0, loc) in - iq + ECase (check, + [((PNone stringTyp, loc), + (ELet ("q", resultTyp, query, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, resultTyp, false), loc))], + {disc = stringTyp, result = resultTyp}) end -fun cacheWrap (query, i, urlifiedRel0, args) = - case query of - (EQuery {state = typ, ...}, _) => - let - val () = ffiInfo := {index = i, params = length args} :: !ffiInfo - val loc = ErrorMsg.dummySpan - (* We ensure before this step that all arguments aren't effectful. - by turning them into local variables as needed. *) - val argTyps = map (fn e => (e, stringTyp)) args - val argTypsInc = map (fn (e, typ) => (incRels 1 e, typ)) argTyps - val check = ffiAppCache ("check", i, argTyps) - val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argTypsInc) - val rel0 = (ERel 0, loc) - in - (ECase (check, - [((PNone stringTyp, loc), - (ELet ("q", typ, query, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), - (* Boolean is false because we're not unurlifying from a cookie. *) - (EUnurlify (rel0, typ, false), loc))], - {disc = stringTyp, result = typ}), - loc) - end - | _ => raise Match - fun fileMapfold doExp file start = case MonoUtil.File.mapfold {typ = Search.return2, exp = fn x => (fn s => Search.Continue (doExp x s)), @@ -504,23 +467,23 @@ fun factorOutNontrivial text = fun addChecking file = let - fun doExp (queryInfo as (tableToIndices, indexToQueryNumArgs)) = - fn e' as ELet (v, t, - (EQuery {query = origQueryText, - initial, body, state, tables, exps, sqlcacheInfo}, queryLoc), - letBody) => + fun doExp (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = + fn e' as EQuery {query = origQueryText, + sqlcacheInfo = urlifiedRel0, + state = resultTyp, + initial, body, tables, exps} => let val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText (* Increment once for each new variable just made. *) val queryExp = incRels numArgs (EQuery {query = newQueryText, + sqlcacheInfo = urlifiedRel0, + state = resultTyp, initial = initial, body = body, - state = state, tables = tables, - exps = exps, - sqlcacheInfo = sqlcacheInfo}, - queryLoc) + exps = exps}, + ErrorMsg.dummySpan) val (EQuery {query = queryText, ...}, _) = queryExp val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) val args = List.tabulate (numArgs, fn n => (ERel n, ErrorMsg.dummySpan)) @@ -532,24 +495,20 @@ fun addChecking file = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( bind (Sql.parse Sql.query queryText) (fn queryParsed => - bind (indexOfName v) (fn index => - bind (IM.find (!urlifiedRel0s, index)) (fn urlifiedRel0 => - SOME (wrapLets (ELet (v, t, - cacheWrap (queryExp, index, urlifiedRel0, args), - incRelsBound 1 numArgs letBody)), + SOME (wrapLets (cacheWrap (queryExp, index, urlifiedRel0, resultTyp, args)), (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) tableToIndices (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)))))))) + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + index + 1)))) in case attempt of SOME pair => pair | NONE => (e', queryInfo) end - | ESeq ((EFfiApp ("Sqlcache", "dummy", _), _), (e', _)) => (e', queryInfo) | e' => (e', queryInfo) in - fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty) + fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty, 0) end val gunk : (Sql.query * Sql.dml * Mono.exp list list) list ref = ref [] @@ -601,7 +560,7 @@ fun invalidations ((query, numArgs), dml) = (* gunk := (queryParsed, dmlParsed, invalidations (numArgs, queryParsed, dmlParsed)) :: !gunk); *) -fun addFlushing (file, (tableToIndices, indexToQueryNumArgs)) = +fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = let (* TODO: write this. *) val allInvs = () (* SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] tableToIndices *) -- cgit v1.2.3 From 46f7447e864b89097adedb70827017a5eef234f0 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 31 Mar 2015 04:10:46 -0400 Subject: Fix type in flush FFI call to option string (rather than string). --- src/sqlcache.sml | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 13a47c9d..f60555e8 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -12,6 +12,8 @@ structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) (* Filled in by [cacheWrap] during [Sqlcache]. *) val ffiInfo : {index : int, params : int} list ref = ref [] +fun resetFfiInfo () = ffiInfo := [] + fun getFfiInfo () = !ffiInfo (* Some FFIs have writing as their only effect, which the caching records. *) @@ -376,6 +378,7 @@ val sequence = end | _ => raise Match +(* TODO: factor out. *) fun ffiAppCache' (func, index, args) : Mono.exp' = EFfiApp ("Sqlcache", func ^ Int.toString index, args) @@ -406,7 +409,7 @@ fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = val () = ffiInfo := {index = i, params = length args} :: !ffiInfo val loc = ErrorMsg.dummySpan (* We ensure before this step that all arguments aren't effectful. - by turning them into local variables as needed. *) + by turning them into local variables as needed. *) val argTyps = map (fn e => (e, stringTyp)) args val argTypsInc = map (fn (e, typ) => (incRels 1 e, typ)) argTyps val check = ffiAppCache ("check", i, argTyps) @@ -457,7 +460,7 @@ fun factorOutNontrivial text = chunks fun wrapLets e' = (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet ("sqlArgz", stringTyp, v, (e', loc))) + List.foldl (fn (v, e') => ELet ("sqlArg", stringTyp, v, (e', loc))) e' newVariables val numArgs = length newVariables @@ -511,11 +514,6 @@ fun addChecking file = fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty, 0) end -val gunk : (Sql.query * Sql.dml * Mono.exp list list) list ref = ref [] - -val gunk' : (((Sql.cmp * Sql.sqexp * Sql.sqexp) formula) - * ((Sql.cmp * Sql.sqexp * Sql.sqexp) formula)) list ref = ref [] - fun invalidations ((query, numArgs), dml) = let val loc = ErrorMsg.dummySpan @@ -553,28 +551,28 @@ fun invalidations ((query, numArgs), dml) = fun removeRedundant xss = removeRedundant' (xss, []) val eqss = conflictMaps (queryToFormula query, dmlToFormula dml) in - gunk' := (queryToFormula query, dmlToFormula dml) :: !gunk'; (map (map optionAtomExpToExp) o removeRedundant o map eqsToInvalidation) eqss end - -(* gunk := (queryParsed, dmlParsed, invalidations (numArgs, queryParsed, dmlParsed)) :: !gunk); *) - fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = let - (* TODO: write this. *) - val allInvs = () (* SM.foldr (fn (x, acc) => IS.listItems x @ acc) [] tableToIndices *) + (* ASK: does this type actually matter? It was wrong before, but things + still seemed to work. *) + val optionStringTyp = (TOption stringTyp, ErrorMsg.dummySpan) val flushes = List.concat o map (fn (i, argss) => map (fn args => ffiAppCache' ("flush", i, - map (fn arg => (arg, stringTyp)) args)) argss) + map (fn arg => (arg, optionStringTyp)) + args)) + argss) val doExp = fn EDml (origDmlText, failureMode) => let val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) + (* DEBUG: we can remove the following line at some point. *) val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) val invs = case Sql.parse Sql.dml dmlText of @@ -613,7 +611,8 @@ val inlineSql = fun go file = let - val () = Sql.sqlcacheMode := true + (* TODO: do something nicer than having Sql be in one of two modes. *) + val () = (resetFfiInfo (); Sql.sqlcacheMode := true) val file' = addFlushing (addChecking (inlineSql file)) val () = Sql.sqlcacheMode := false in -- cgit v1.2.3 From f1327b29e1c499845d13e01b4c1635d616713493 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 7 Apr 2015 14:18:53 -0400 Subject: New mouse events oncontextmenu, onmouseenter, and onmouseleave. --- lib/js/urweb.js | 12 ++++++++++++ lib/ur/basis.urs | 5 ++++- src/settings.sml | 11 ++++++++++- tests/docevents.ur | 7 ++++--- 4 files changed, 30 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/lib/js/urweb.js b/lib/js/urweb.js index b599393b..335cb525 100644 --- a/lib/js/urweb.js +++ b/lib/js/urweb.js @@ -537,6 +537,10 @@ function uw_onClick(f) { uw_handler("onclick", f); } +function uw_onContextmenu(f) { + uw_handler("oncontextmenu", f); +} + function uw_onDblclick(f) { uw_handler("ondblclick", f); } @@ -545,6 +549,14 @@ function uw_onMousedown(f) { uw_handler("onmousedown", f); } +function uw_onMouseenter(f) { + uw_handler("onmouseenter", f); +} + +function uw_onMouseleave(f) { + uw_handler("onmouseleave", f); +} + function uw_onMousemove(f) { uw_handler("onmousemove", f); } diff --git a/lib/ur/basis.urs b/lib/ur/basis.urs index b8e52582..28384c2c 100644 --- a/lib/ur/basis.urs +++ b/lib/ur/basis.urs @@ -833,7 +833,7 @@ type mouseEvent = { ScreenX : int, ScreenY : int, ClientX : int, ClientY : int, Button : mouseButton } con mouseEvents = map (fn _ :: Unit => mouseEvent -> transaction unit) - [Onclick, Ondblclick, Onmousedown, Onmousemove, Onmouseout, Onmouseover, Onmouseup] + [Onclick, Oncontextmenu, Ondblclick, Onmousedown, Onmouseenter, Onmouseleave, Onmousemove, Onmouseout, Onmouseover, Onmouseup] type keyEvent = { KeyCode : int, CtrlKey : bool, ShiftKey : bool, AltKey : bool, MetaKey : bool } @@ -1120,10 +1120,13 @@ val onServerError : (string -> transaction unit) -> transaction unit (* More standard document-level JavaScript handlers *) val onClick : (mouseEvent -> transaction unit) -> transaction unit val onDblclick : (mouseEvent -> transaction unit) -> transaction unit +val onContextmenu : (mouseEvent -> transaction unit) -> transaction unit val onKeydown : (keyEvent -> transaction unit) -> transaction unit val onKeypress : (keyEvent -> transaction unit) -> transaction unit val onKeyup : (keyEvent -> transaction unit) -> transaction unit val onMousedown : (mouseEvent -> transaction unit) -> transaction unit +val onMouseenter : (mouseEvent -> transaction unit) -> transaction unit +val onMouseleave : (mouseEvent -> transaction unit) -> transaction unit val onMousemove : (mouseEvent -> transaction unit) -> transaction unit val onMouseout : (mouseEvent -> transaction unit) -> transaction unit val onMouseover : (mouseEvent -> transaction unit) -> transaction unit diff --git a/src/settings.sml b/src/settings.sml index bd958e22..e15988cd 100644 --- a/src/settings.sml +++ b/src/settings.sml @@ -176,10 +176,13 @@ val benignBase = basis ["get_cookie", "spawn", "onClick", "onDblclick", + "onContextmenu", "onKeydown", "onKeypress", "onKeyup", "onMousedown", + "onMouseenter", + "onMouseleave", "onMousemove", "onMouseout", "onMouseover", @@ -212,11 +215,14 @@ val clientBase = basis ["get_client_source", "mouseEvent", "keyEvent", "onClick", + "onContextmenu", "onDblclick", "onKeydown", "onKeypress", "onKeyup", "onMousedown", + "onMouseenter", + "onMouseleave", "onMousemove", "onMouseout", "onMouseover", @@ -349,11 +355,14 @@ val jsFuncsBase = basisM [("alert", "alert"), ("onClick", "uw_onClick"), + ("onContextmenu", "uw_onContextmenu"), ("onDblclick", "uw_onDblclick"), ("onKeydown", "uw_onKeydown"), ("onKeypress", "uw_onKeypress"), ("onKeyup", "uw_onKeyup"), ("onMousedown", "uw_onMousedown"), + ("onMouseenter", "uw_onMouseenter"), + ("onMouseleave", "uw_onMouseleave"), ("onMousemove", "uw_onMousemove"), ("onMouseout", "uw_onMouseout"), ("onMouseover", "uw_onMouseover"), @@ -764,7 +773,7 @@ fun mangleSqlTable s = fun mangleSql s = if #name (currentDbms ()) = "mysql" then if !mangle then - "uw_" ^ allLower s + "uw_" ^ allLower s else allLower s else diff --git a/tests/docevents.ur b/tests/docevents.ur index eed38868..906afa2b 100644 --- a/tests/docevents.ur +++ b/tests/docevents.ur @@ -1,6 +1,7 @@ fun main () : transaction page = return - alert ("Keypress: " ^ show k))}> + alert "Double click"); + onContextmenu (fn _ => alert "Context menu"); + onKeypress (fn k => alert ("Keypress: " ^ show k.KeyCode))}> Nothing here. - + -- cgit v1.2.3 From f242d9d14317ee01328b8a071502133696f78aa8 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 6 May 2015 14:50:29 -0400 Subject: Factor out cache implementation from Sqlcache. --- src/cjr_print.sml | 106 +----------------------------------------------------- src/sources | 2 ++ src/sqlcache.sml | 52 +++++++++------------------ 3 files changed, 19 insertions(+), 141 deletions(-) (limited to 'src') diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 1b1d656d..12ad309a 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3404,111 +3404,7 @@ fun p_file env (ds, ps) = newline, (* For sqlcache. *) - box (List.map - (fn {index, params} => - let val i = Int.toString index - fun paramRepeat itemi sep = - let - fun f n = - if n < 0 then "" - else if n = 0 then itemi (Int.toString 0) - else f (n-1) ^ sep ^ itemi (Int.toString n) - in - f (params - 1) - end - fun paramRepeatInit itemi sep = - if params = 0 then "" else sep ^ paramRepeat itemi sep - val args = paramRepeatInit (fn p => "uw_Basis_string p" ^ p) ", " - val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" - ^ p ^ " = NULL;") - "\n" - val sets = paramRepeat (fn p => "param" ^ i ^ "_" ^ p - ^ " = strdup(p" ^ p ^ ");") - "\n" - val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") - "\n" - val eqs = paramRepeatInit (fn p => "strcmp(param" ^ i ^ "_" ^ p - ^ ", p" ^ p ^ ")") - " || " - (* Using [!=] instead of [==] to mimic [strcmp]. *) - val eqsNull = paramRepeatInit (fn p => "(p" ^ p ^ " == NULL || " - ^ "!strcmp(param" ^ i ^ "_" - ^ p ^ ", p" ^ p ^ "))") - " && " - in box [string "static char *cacheQuery", - string i, - string " = NULL;", - newline, - string "static char *cacheWrite", - string i, - string " = NULL;", - newline, - string decls, - newline, - string "static uw_Basis_string uw_Sqlcache_check", - string i, - string "(uw_context ctx", - string args, - string ") {\n if (cacheQuery", - string i, - (* ASK: is returning the pointer okay? Should we duplicate? *) - string " == NULL", - string eqs, - string ") {\n puts(\"SQLCACHE: miss ", - string i, - string ".\");\n uw_recordingStart(ctx);\n return NULL;\n } else {\n puts(\"SQLCACHE: hit ", - string i, - string ".\");\n uw_write(ctx, cacheWrite", - string i, - string ");\n return cacheQuery", - string i, - string ";\n } };", - newline, - string "static uw_unit uw_Sqlcache_store", - string i, - string "(uw_context ctx, uw_Basis_string s", - string args, - string ") {\n free(cacheQuery", - string i, - string "); free(cacheWrite", - string i, - string ");", - newline, - string frees, - newline, - string "cacheQuery", - string i, - string " = strdup(s); cacheWrite", - string i, - string " = uw_recordingRead(ctx);", - newline, - string sets, - newline, - string "puts(\"SQLCACHE: store ", - string i, - string ".\");\n return uw_unit_v;\n };", - newline, - string "static uw_unit uw_Sqlcache_flush", - string i, - string "(uw_context ctx", - string args, - string ") {\n if (cacheQuery", - string i, - string " != NULL", - string eqsNull, - string ") {\n free(cacheQuery", - string i, - string ");\n cacheQuery", - string i, - string " = NULL;\n puts(\"SQLCACHE: flush ", - string i, - string ".\");}\n else { puts(\"SQLCACHE: keep ", - string i, - string ".\"); } return uw_unit_v;\n };", - newline, - newline] - end) - (Sqlcache.getFfiInfo ())), + box (List.map ToyCache.setupQuery (Sqlcache.getFfiInfo ())), newline, p_list_sep newline (fn x => x) pds, diff --git a/src/sources b/src/sources index 33c01f94..05897cd4 100644 --- a/src/sources +++ b/src/sources @@ -175,6 +175,8 @@ $(SRC)/union_find_fn.sml $(SRC)/multimap_fn.sml +$(SRC)/toy_cache.sml + $(SRC)/sqlcache.sig $(SRC)/sqlcache.sml diff --git a/src/sqlcache.sml b/src/sqlcache.sml index f60555e8..931c6737 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -43,7 +43,7 @@ val ffiEffectful = (* Effect analysis. *) (* Makes an exception for [EWrite] (which is recorded when caching). *) -fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : Mono.exp -> bool = +fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : exp -> bool = (* If result is true, expression is definitely effectful. If result is false, then expression is definitely not effectful if effs is fully populated. The intended pattern is to use this a number of times equal @@ -183,6 +183,7 @@ fun mapFormula mf = | Negate f => Negate (mapFormula mf f) | Combo (n, fs) => Combo (n, map (mapFormula mf) fs) + (* SQL analysis. *) val rec chooseTwos : 'a list -> ('a * 'a) list = @@ -365,33 +366,21 @@ val tableDml = (* Program instrumentation. *) -fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), ErrorMsg.dummySpan) +val dummyLoc = ErrorMsg.dummySpan + +fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), dummyLoc) -val stringTyp = (TFfi ("Basis", "string"), ErrorMsg.dummySpan) +val stringTyp = (TFfi ("Basis", "string"), dummyLoc) val sequence = fn (exp :: exps) => let - val loc = ErrorMsg.dummySpan + val loc = dummyLoc in List.foldl (fn (e', seq) => ESeq ((seq, loc), (e', loc))) exp exps end | _ => raise Match -(* TODO: factor out. *) -fun ffiAppCache' (func, index, args) : Mono.exp' = - EFfiApp ("Sqlcache", func ^ Int.toString index, args) - -fun ffiAppCache (func, index, args) : Mono.exp = - (ffiAppCache' (func, index, args), ErrorMsg.dummySpan) - -val varPrefix = "queryResult" - -fun indexOfName varName = - if String.isPrefix varPrefix varName - then Int.fromString (String.extract (varName, String.size varPrefix, NONE)) - else NONE - (* Always increments negative indices because that's what we need later. *) fun incRelsBound bound inc = MonoUtil.Exp.mapB @@ -407,13 +396,12 @@ val incRels = incRelsBound 0 fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = let val () = ffiInfo := {index = i, params = length args} :: !ffiInfo - val loc = ErrorMsg.dummySpan + val loc = dummyLoc (* We ensure before this step that all arguments aren't effectful. by turning them into local variables as needed. *) - val argTyps = map (fn e => (e, stringTyp)) args - val argTypsInc = map (fn (e, typ) => (incRels 1 e, typ)) argTyps - val check = ffiAppCache ("check", i, argTyps) - val store = ffiAppCache ("store", i, (urlifiedRel0, stringTyp) :: argTypsInc) + val argsInc = map (incRels 1) args + val check = (ToyCache.check (i, args), dummyLoc) + val store = (ToyCache.store (i, argsInc, urlifiedRel0), dummyLoc) val rel0 = (ERel 0, loc) in ECase (check, @@ -436,7 +424,7 @@ fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file () fun factorOutNontrivial text = let - val loc = ErrorMsg.dummySpan + val loc = dummyLoc fun strcat (e1, e2) = (EStrcat (e1, e2), loc) val chunks = Sql.chunkify text val (newText, newVariables) = @@ -486,10 +474,10 @@ fun addChecking file = body = body, tables = tables, exps = exps}, - ErrorMsg.dummySpan) + dummyLoc) val (EQuery {query = queryText, ...}, _) = queryExp val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) - val args = List.tabulate (numArgs, fn n => (ERel n, ErrorMsg.dummySpan)) + val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE (* DEBUG: set first boolean argument to true to turn on printing. *) @@ -516,7 +504,7 @@ fun addChecking file = fun invalidations ((query, numArgs), dml) = let - val loc = ErrorMsg.dummySpan + val loc = dummyLoc val optionAtomExpToExp = fn NONE => (ENone stringTyp, loc) | SOME e => (ESome (stringTyp, @@ -556,16 +544,8 @@ fun invalidations ((query, numArgs), dml) = fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = let - (* ASK: does this type actually matter? It was wrong before, but things - still seemed to work. *) - val optionStringTyp = (TOption stringTyp, ErrorMsg.dummySpan) val flushes = List.concat o - map (fn (i, argss) => - map (fn args => - ffiAppCache' ("flush", i, - map (fn arg => (arg, optionStringTyp)) - args)) - argss) + map (fn (i, argss) => map (fn args => ToyCache.flush (i, args)) argss) val doExp = fn EDml (origDmlText, failureMode) => let -- cgit v1.2.3 From d3f147f2de07ca854f3eb8679e05460ea0c2c841 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 6 May 2015 14:50:55 -0400 Subject: Add missing file. --- src/toy_cache.sml | 185 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 src/toy_cache.sml (limited to 'src') diff --git a/src/toy_cache.sml b/src/toy_cache.sml new file mode 100644 index 00000000..23dfe4fe --- /dev/null +++ b/src/toy_cache.sml @@ -0,0 +1,185 @@ +structure ToyCache = struct + +(* Mono *) + +open Mono + +val dummyLoc = ErrorMsg.dummySpan +val stringTyp = (TFfi ("Basis", "string"), dummyLoc) +val optionStringTyp = (TOption stringTyp, dummyLoc) +fun withTyp typ = map (fn exp => (exp, typ)) + +fun ffiAppCache' (func, index, argTyps) = + EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) + +fun check (index, keys) = + ffiAppCache' ("check", index, withTyp stringTyp keys) + +fun store (index, keys, value) = + ffiAppCache' ("store", index, (value, stringTyp) :: withTyp stringTyp keys) + +fun flush (index, keys) = + ffiAppCache' ("flush", index, withTyp optionStringTyp keys) + + +(* Cjr *) + +open Print +open Print.PD + +fun setupQuery {index, params} = + let + + val i = Int.toString index + + fun paramRepeat itemi sep = + let + fun f n = + if n < 0 then "" + else if n = 0 then itemi (Int.toString 0) + else f (n-1) ^ sep ^ itemi (Int.toString n) + in + f (params - 1) + end + + fun paramRepeatInit itemi sep = + if params = 0 then "" else sep ^ paramRepeat itemi sep + + val args = paramRepeatInit (fn p => "uw_Basis_string p" ^ p) ", " + + val decls = paramRepeat (fn p => "uw_Basis_string param" ^ i ^ "_" + ^ p ^ " = NULL;") + "\n" + + val sets = paramRepeat (fn p => "param" ^ i ^ "_" ^ p + ^ " = strdup(p" ^ p ^ ");") + "\n" + + val frees = paramRepeat (fn p => "free(param" ^ i ^ "_" ^ p ^ ");") + "\n" + + val eqs = paramRepeatInit (fn p => "strcmp(param" ^ i ^ "_" ^ p + ^ ", p" ^ p ^ ")") + " || " + + (* Using [!=] instead of [==] to mimic [strcmp]. *) + val eqsNull = paramRepeatInit (fn p => "(p" ^ p ^ " == NULL || " + ^ "!strcmp(param" ^ i ^ "_" + ^ p ^ ", p" ^ p ^ "))") + " && " + + in + Print.box + [string "static char *cacheQuery", + string i, + string " = NULL;", + newline, + string "static char *cacheWrite", + string i, + string " = NULL;", + newline, + string decls, + newline, + string "static uw_Basis_string uw_Sqlcache_check", + string i, + string "(uw_context ctx", + string args, + string ") {", + newline, + string "if (cacheQuery", + string i, + (* ASK: is returning the pointer okay? Should we duplicate? *) + string " == NULL", + string eqs, + string ") {", + newline, + string "puts(\"SQLCACHE: miss ", + string i, + string ".\");", + newline, + string "uw_recordingStart(ctx);", + newline, + string "return NULL;", + newline, + string "} else {", + newline, + string "puts(\"SQLCACHE: hit ", + string i, + string ".\");", + newline, + string "uw_write(ctx, cacheWrite", + string i, + string ");", + newline, + string "return cacheQuery", + string i, + string ";", + newline, + string "} };", + newline, + string "static uw_unit uw_Sqlcache_store", + string i, + string "(uw_context ctx, uw_Basis_string s", + string args, + string ") {", + newline, + string "free(cacheQuery", + string i, + string "); free(cacheWrite", + string i, + string ");", + newline, + string frees, + newline, + string "cacheQuery", + string i, + string " = strdup(s); cacheWrite", + string i, + string " = uw_recordingRead(ctx);", + newline, + string sets, + newline, + string "puts(\"SQLCACHE: store ", + string i, + string ".\");", + newline, + string "return uw_unit_v;", + newline, + string "};", + newline, + string "static uw_unit uw_Sqlcache_flush", + string i, + string "(uw_context ctx", + string args, + string ") {", + newline, + string "if (cacheQuery", + string i, + string " != NULL", + string eqsNull, + string ") {", + newline, + string "free(cacheQuery", + string i, + string ");", + newline, + string "cacheQuery", + string i, + string " = NULL;", + newline, + string "puts(\"SQLCACHE: flush ", + string i, + string ".\");}", + newline, + string "else { puts(\"SQLCACHE: keep ", + string i, + string ".\"); } return uw_unit_v;", + newline, + string "};", + newline, + newline] + end + +val setupGlobal = string "/* No global setup for toy cache. */" + +end -- cgit v1.2.3 From ca3efa1458583772a9826198ed4b99eec381f2de Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 6 May 2015 23:11:30 -0400 Subject: More work factoring out Sqlcache back end. --- src/cache.sml | 16 ++++++++++++++++ src/cjr_print.sml | 6 +++++- src/sources | 1 + src/sqlcache.sig | 5 ++++- src/sqlcache.sml | 14 ++++++++++---- src/toy_cache.sml | 11 ++++++++++- 6 files changed, 46 insertions(+), 7 deletions(-) create mode 100644 src/cache.sml (limited to 'src') diff --git a/src/cache.sml b/src/cache.sml new file mode 100644 index 00000000..8de22e0d --- /dev/null +++ b/src/cache.sml @@ -0,0 +1,16 @@ +structure Cache = struct + +type cache = + {(* Takes a query ID and parameters (and, for store, the value to + store) and gives an FFI call that checks, stores, or flushes the + relevant entry. The parameters are strings for check and store and + optional strings for flush because some parameters might not be + fixed. *) + check : int * Mono.exp list -> Mono.exp', + store : int * Mono.exp list * Mono.exp -> Mono.exp', + flush : int * Mono.exp list -> Mono.exp', + (* Generates C needed for FFI calls in check, store, and flush. *) + setupGlobal : Print.PD.pp_desc, + setupQuery : {index : int, params : int} -> Print.PD.pp_desc} + +end diff --git a/src/cjr_print.sml b/src/cjr_print.sml index 12ad309a..e6ecedde 100644 --- a/src/cjr_print.sml +++ b/src/cjr_print.sml @@ -3404,7 +3404,11 @@ fun p_file env (ds, ps) = newline, (* For sqlcache. *) - box (List.map ToyCache.setupQuery (Sqlcache.getFfiInfo ())), + let + val {setupGlobal, setupQuery, ...} = Sqlcache.getCache () + in + box (setupGlobal :: newline :: List.map setupQuery (Sqlcache.getFfiInfo ())) + end, newline, p_list_sep newline (fn x => x) pds, diff --git a/src/sources b/src/sources index 05897cd4..aaf640ca 100644 --- a/src/sources +++ b/src/sources @@ -175,6 +175,7 @@ $(SRC)/union_find_fn.sml $(SRC)/multimap_fn.sml +$(SRC)/cache.sml $(SRC)/toy_cache.sml $(SRC)/sqlcache.sig diff --git a/src/sqlcache.sig b/src/sqlcache.sig index ccc1741a..fabc9ebf 100644 --- a/src/sqlcache.sig +++ b/src/sqlcache.sig @@ -1,6 +1,9 @@ signature SQLCACHE = sig -val ffiIndices : int list ref +val setCache : Cache.cache -> unit +val getCache : unit -> Cache.cache + +val getFfiInfo : unit -> {index : int, params : int} list val go : Mono.file -> Mono.file end diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 931c6737..3082904c 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache (* :> SQLCACHE *) = struct +structure Sqlcache :> SQLCACHE = struct open Mono @@ -39,6 +39,10 @@ val ffiEffectful = andalso not (m = "Basis" andalso SS.member (fs, f)) end +val cache = ref ToyCache.cache +fun setCache c = cache := c +fun getCache () = !cache + (* Effect analysis. *) @@ -366,6 +370,8 @@ val tableDml = (* Program instrumentation. *) +val {check, store, flush, ...} = getCache () + val dummyLoc = ErrorMsg.dummySpan fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), dummyLoc) @@ -400,8 +406,8 @@ fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = (* We ensure before this step that all arguments aren't effectful. by turning them into local variables as needed. *) val argsInc = map (incRels 1) args - val check = (ToyCache.check (i, args), dummyLoc) - val store = (ToyCache.store (i, argsInc, urlifiedRel0), dummyLoc) + val check = (check (i, args), dummyLoc) + val store = (store (i, argsInc, urlifiedRel0), dummyLoc) val rel0 = (ERel 0, loc) in ECase (check, @@ -545,7 +551,7 @@ fun invalidations ((query, numArgs), dml) = fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = let val flushes = List.concat o - map (fn (i, argss) => map (fn args => ToyCache.flush (i, args)) argss) + map (fn (i, argss) => map (fn args => flush (i, args)) argss) val doExp = fn EDml (origDmlText, failureMode) => let diff --git a/src/toy_cache.sml b/src/toy_cache.sml index 23dfe4fe..126768b6 100644 --- a/src/toy_cache.sml +++ b/src/toy_cache.sml @@ -1,4 +1,7 @@ -structure ToyCache = struct +structure ToyCache : sig + val cache : Cache.cache +end = struct + (* Mono *) @@ -182,4 +185,10 @@ fun setupQuery {index, params} = val setupGlobal = string "/* No global setup for toy cache. */" + +(* Bundled up. *) + +val cache = {check = check, store = store, flush = flush, + setupQuery = setupQuery, setupGlobal = setupGlobal} + end -- cgit v1.2.3 From 24edb607ef64db1ab12b3d5b9ccd3848c50780d1 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 28 Jun 2015 12:46:51 -0700 Subject: Progress on LRU cache but still more known bugs to fix. --- caching-tests/test.ur | 17 +- include/urweb/types_cpp.h | 31 ++ include/urweb/urweb_cpp.h | 8 + include/urweb/uthash.h | 963 ++++++++++++++++++++++++++++++++++++++++++++++ src/c/urweb.c | 147 +++++++ src/lru_cache.sml | 171 ++++++++ src/sources | 1 + src/sqlcache.sml | 115 +++--- 8 files changed, 1397 insertions(+), 56 deletions(-) create mode 100644 include/urweb/uthash.h create mode 100644 src/lru_cache.sml (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 8035e336..842fd77d 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -16,13 +16,24 @@ fun flush id = FROM tab WHERE tab.Id = {[id]}); (case res of - None => dml (INSERT INTO tab (Id, Val) - VALUES ({[id]}, 0)) + None => return () (* dml (INSERT INTO tab (Id, Val) *) + (* VALUES ({[id]}, 0)) *) | Some row => dml (UPDATE tab SET Val = {[row.Tab.Val + 1]} - WHERE Id = {[id]})); + WHERE Id = {[id + 1]} OR Id = {[id]} (* OR Id = {[id - 1]} *))); return {case res of None => Initialized {[id]}! | Some row => Incremented {[id]}!} + +(* task periodic 5 = *) +(* fn () => *) +(* t <- now; *) +(* let *) +(* val n = toSeconds t % 2 *) +(* in *) +(* dml (UPDATE tab *) +(* SET Val = 9001 *) +(* WHERE Id = {[n]} OR Id = {[n+1]}) *) +(* end *) diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 0c431ff8..2f154e1f 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -119,4 +119,35 @@ typedef struct { char *start, *front, *back; } uw_buffer; +// Caching + +#include "uthash.h" + +typedef struct CacheValue { + char *result; + char *output; +} CacheValue; + +typedef struct CacheEntry { + char *key; + void *value; + time_t timeValid; + struct CacheEntry *prev; + struct CacheEntry *next; + UT_hash_handle hh; +} CacheEntry; + +typedef struct CacheList { + CacheEntry *first; + CacheEntry *last; + int size; +} CacheList; + +typedef struct Cache { + CacheEntry *table; + time_t timeInvalid; + CacheList *lru; + int height; +} Cache; + #endif diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index a9d42554..3ae5b69e 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -402,4 +402,12 @@ void uw_set_remoteSock(struct uw_context *, int sock); void uw_Basis_writec(struct uw_context *, char); +// Sqlcache. + +#include "uthash.h" + +CacheValue *check(Cache *, char **); +CacheValue *store(Cache *, char **, CacheValue *); +CacheValue *flush(Cache *, char **); + #endif diff --git a/include/urweb/uthash.h b/include/urweb/uthash.h new file mode 100644 index 00000000..367d295a --- /dev/null +++ b/include/urweb/uthash.h @@ -0,0 +1,963 @@ +/* +Copyright (c) 2003-2014, Troy D. Hanson http://troydhanson.github.com/uthash/ +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTHASH_H +#define UTHASH_H + +#include /* memcmp,strlen */ +#include /* ptrdiff_t */ +#include /* exit() */ + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ source) this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#if defined(_MSC_VER) /* MS compiler */ +#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ +#define DECLTYPE(x) (decltype(x)) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#define DECLTYPE(x) +#endif +#elif defined(__BORLANDC__) || defined(__LCC__) || defined(__WATCOMC__) +#define NO_DECLTYPE +#define DECLTYPE(x) +#else /* GNU, Sun and other compilers */ +#define DECLTYPE(x) (__typeof(x)) +#endif + +#ifdef NO_DECLTYPE +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + char **_da_dst = (char**)(&(dst)); \ + *_da_dst = (char*)(src); \ +} while(0) +#else +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + (dst) = DECLTYPE(dst)(src); \ +} while(0) +#endif + +/* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */ +#if defined (_WIN32) +#if defined(_MSC_VER) && _MSC_VER >= 1600 +#include +#elif defined(__WATCOMC__) +#include +#else +typedef unsigned int uint32_t; +typedef unsigned char uint8_t; +#endif +#else +#include +#endif + +#define UTHASH_VERSION 1.9.9 + +#ifndef uthash_fatal +#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ +#endif +#ifndef uthash_malloc +#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ +#endif +#ifndef uthash_free +#define uthash_free(ptr,sz) free(ptr) /* free fcn */ +#endif + +#ifndef uthash_noexpand_fyi +#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ +#endif +#ifndef uthash_expand_fyi +#define uthash_expand_fyi(tbl) /* can be defined to log expands */ +#endif + +/* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ +#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ + +/* calculate the element whose hash handle address is hhe */ +#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) + +#define HASH_FIND(hh,head,keyptr,keylen,out) \ +do { \ + out=NULL; \ + if (head != NULL) { \ + unsigned _hf_bkt,_hf_hashv; \ + HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ + if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv) != 0) { \ + HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ + keyptr,keylen,out); \ + } \ + } \ +} while (0) + +#ifdef HASH_BLOOM +#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) +#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) +#define HASH_BLOOM_MAKE(tbl) \ +do { \ + (tbl)->bloom_nbits = HASH_BLOOM; \ + (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ + if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ + memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ + (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ +} while (0) + +#define HASH_BLOOM_FREE(tbl) \ +do { \ + uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ +} while (0) + +#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) +#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) + +#define HASH_BLOOM_ADD(tbl,hashv) \ + HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U))) + +#define HASH_BLOOM_TEST(tbl,hashv) \ + HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U))) + +#else +#define HASH_BLOOM_MAKE(tbl) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_TEST(tbl,hashv) (1) +#define HASH_BLOOM_BYTELEN 0U +#endif + +#define HASH_MAKE_TABLE(hh,head) \ +do { \ + (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ + sizeof(UT_hash_table)); \ + if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ + (head)->hh.tbl->tail = &((head)->hh); \ + (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ + (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ + (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ + (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl->buckets, 0, \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_MAKE((head)->hh.tbl); \ + (head)->hh.tbl->signature = HASH_SIGNATURE; \ +} while(0) + +#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ + HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add) + +#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ +do { \ + replaced=NULL; \ + HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \ + if (replaced!=NULL) { \ + HASH_DELETE(hh,head,replaced); \ + } \ + HASH_ADD(hh,head,fieldname,keylen_in,add); \ +} while(0) + +#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ +do { \ + unsigned _ha_bkt; \ + (add)->hh.next = NULL; \ + (add)->hh.key = (char*)(keyptr); \ + (add)->hh.keylen = (unsigned)(keylen_in); \ + if (!(head)) { \ + head = (add); \ + (head)->hh.prev = NULL; \ + HASH_MAKE_TABLE(hh,head); \ + } else { \ + (head)->hh.tbl->tail->next = (add); \ + (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ + (head)->hh.tbl->tail = &((add)->hh); \ + } \ + (head)->hh.tbl->num_items++; \ + (add)->hh.tbl = (head)->hh.tbl; \ + HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ + (add)->hh.hashv, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ + HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ + HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ + HASH_FSCK(hh,head); \ +} while(0) + +#define HASH_TO_BKT( hashv, num_bkts, bkt ) \ +do { \ + bkt = ((hashv) & ((num_bkts) - 1U)); \ +} while(0) + +/* delete "delptr" from the hash table. + * "the usual" patch-up process for the app-order doubly-linked-list. + * The use of _hd_hh_del below deserves special explanation. + * These used to be expressed using (delptr) but that led to a bug + * if someone used the same symbol for the head and deletee, like + * HASH_DELETE(hh,users,users); + * We want that to work, but by changing the head (users) below + * we were forfeiting our ability to further refer to the deletee (users) + * in the patch-up process. Solution: use scratch space to + * copy the deletee pointer, then the latter references are via that + * scratch pointer rather than through the repointed (users) symbol. + */ +#define HASH_DELETE(hh,head,delptr) \ +do { \ + struct UT_hash_handle *_hd_hh_del; \ + if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + head = NULL; \ + } else { \ + unsigned _hd_bkt; \ + _hd_hh_del = &((delptr)->hh); \ + if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ + (head)->hh.tbl->tail = \ + (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho); \ + } \ + if ((delptr)->hh.prev != NULL) { \ + ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ + } else { \ + DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ + } \ + if (_hd_hh_del->next != NULL) { \ + ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \ + (head)->hh.tbl->hho))->prev = \ + _hd_hh_del->prev; \ + } \ + HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ + HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ + (head)->hh.tbl->num_items--; \ + } \ + HASH_FSCK(hh,head); \ +} while (0) + + +/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ +#define HASH_FIND_STR(head,findstr,out) \ + HASH_FIND(hh,head,findstr,(unsigned)strlen(findstr),out) +#define HASH_ADD_STR(head,strfield,add) \ + HASH_ADD(hh,head,strfield[0],(unsigned int)strlen(add->strfield),add) +#define HASH_REPLACE_STR(head,strfield,add,replaced) \ + HASH_REPLACE(hh,head,strfield[0],(unsigned)strlen(add->strfield),add,replaced) +#define HASH_FIND_INT(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int),out) +#define HASH_ADD_INT(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int),add) +#define HASH_REPLACE_INT(head,intfield,add,replaced) \ + HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) +#define HASH_FIND_PTR(head,findptr,out) \ + HASH_FIND(hh,head,findptr,sizeof(void *),out) +#define HASH_ADD_PTR(head,ptrfield,add) \ + HASH_ADD(hh,head,ptrfield,sizeof(void *),add) +#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ + HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) +#define HASH_DEL(head,delptr) \ + HASH_DELETE(hh,head,delptr) + +/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. + * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. + */ +#ifdef HASH_DEBUG +#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) +#define HASH_FSCK(hh,head) \ +do { \ + struct UT_hash_handle *_thh; \ + if (head) { \ + unsigned _bkt_i; \ + unsigned _count; \ + char *_prev; \ + _count = 0; \ + for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ + unsigned _bkt_count = 0; \ + _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ + _prev = NULL; \ + while (_thh) { \ + if (_prev != (char*)(_thh->hh_prev)) { \ + HASH_OOPS("invalid hh_prev %p, actual %p\n", \ + _thh->hh_prev, _prev ); \ + } \ + _bkt_count++; \ + _prev = (char*)(_thh); \ + _thh = _thh->hh_next; \ + } \ + _count += _bkt_count; \ + if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ + HASH_OOPS("invalid bucket count %u, actual %u\n", \ + (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ + } \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid hh item count %u, actual %u\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + /* traverse hh in app order; check next/prev integrity, count */ \ + _count = 0; \ + _prev = NULL; \ + _thh = &(head)->hh; \ + while (_thh) { \ + _count++; \ + if (_prev !=(char*)(_thh->prev)) { \ + HASH_OOPS("invalid prev %p, actual %p\n", \ + _thh->prev, _prev ); \ + } \ + _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ + _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ + (head)->hh.tbl->hho) : NULL ); \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid app item count %u, actual %u\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + } \ +} while (0) +#else +#define HASH_FSCK(hh,head) +#endif + +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to + * the descriptor to which this macro is defined for tuning the hash function. + * The app can #include to get the prototype for write(2). */ +#ifdef HASH_EMIT_KEYS +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ +do { \ + unsigned _klen = fieldlen; \ + write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ + write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ +} while (0) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#endif + +/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ +#ifdef HASH_FUNCTION +#define HASH_FCN HASH_FUNCTION +#else +#define HASH_FCN HASH_JEN +#endif + +/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ +#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hb_keylen=(unsigned)keylen; \ + const unsigned char *_hb_key=(const unsigned char*)(key); \ + (hashv) = 0; \ + while (_hb_keylen-- != 0U) { \ + (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ + } \ + bkt = (hashv) & (num_bkts-1U); \ +} while (0) + + +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at + * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ +#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _sx_i; \ + const unsigned char *_hs_key=(const unsigned char*)(key); \ + hashv = 0; \ + for(_sx_i=0; _sx_i < keylen; _sx_i++) { \ + hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ + } \ + bkt = hashv & (num_bkts-1U); \ +} while (0) +/* FNV-1a variation */ +#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _fn_i; \ + const unsigned char *_hf_key=(const unsigned char*)(key); \ + hashv = 2166136261U; \ + for(_fn_i=0; _fn_i < keylen; _fn_i++) { \ + hashv = hashv ^ _hf_key[_fn_i]; \ + hashv = hashv * 16777619U; \ + } \ + bkt = hashv & (num_bkts-1U); \ +} while(0) + +#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _ho_i; \ + const unsigned char *_ho_key=(const unsigned char*)(key); \ + hashv = 0; \ + for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ + hashv += _ho_key[_ho_i]; \ + hashv += (hashv << 10); \ + hashv ^= (hashv >> 6); \ + } \ + hashv += (hashv << 3); \ + hashv ^= (hashv >> 11); \ + hashv += (hashv << 15); \ + bkt = hashv & (num_bkts-1U); \ +} while(0) + +#define HASH_JEN_MIX(a,b,c) \ +do { \ + a -= b; a -= c; a ^= ( c >> 13 ); \ + b -= c; b -= a; b ^= ( a << 8 ); \ + c -= a; c -= b; c ^= ( b >> 13 ); \ + a -= b; a -= c; a ^= ( c >> 12 ); \ + b -= c; b -= a; b ^= ( a << 16 ); \ + c -= a; c -= b; c ^= ( b >> 5 ); \ + a -= b; a -= c; a ^= ( c >> 3 ); \ + b -= c; b -= a; b ^= ( a << 10 ); \ + c -= a; c -= b; c ^= ( b >> 15 ); \ +} while (0) + +#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hj_i,_hj_j,_hj_k; \ + unsigned const char *_hj_key=(unsigned const char*)(key); \ + hashv = 0xfeedbeefu; \ + _hj_i = _hj_j = 0x9e3779b9u; \ + _hj_k = (unsigned)(keylen); \ + while (_hj_k >= 12U) { \ + _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ + + ( (unsigned)_hj_key[2] << 16 ) \ + + ( (unsigned)_hj_key[3] << 24 ) ); \ + _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ + + ( (unsigned)_hj_key[6] << 16 ) \ + + ( (unsigned)_hj_key[7] << 24 ) ); \ + hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ + + ( (unsigned)_hj_key[10] << 16 ) \ + + ( (unsigned)_hj_key[11] << 24 ) ); \ + \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + \ + _hj_key += 12; \ + _hj_k -= 12U; \ + } \ + hashv += (unsigned)(keylen); \ + switch ( _hj_k ) { \ + case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ + case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ + case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ + case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ + case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ + case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ + case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ + case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ + case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ + case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ + case 1: _hj_i += _hj_key[0]; \ + } \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + bkt = hashv & (num_bkts-1U); \ +} while(0) + +/* The Paul Hsieh hash function */ +#undef get16bits +#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ + || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) +#define get16bits(d) (*((const uint16_t *) (d))) +#endif + +#if !defined (get16bits) +#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ + +(uint32_t)(((const uint8_t *)(d))[0]) ) +#endif +#define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned const char *_sfh_key=(unsigned const char*)(key); \ + uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ + \ + unsigned _sfh_rem = _sfh_len & 3U; \ + _sfh_len >>= 2; \ + hashv = 0xcafebabeu; \ + \ + /* Main loop */ \ + for (;_sfh_len > 0U; _sfh_len--) { \ + hashv += get16bits (_sfh_key); \ + _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ + hashv = (hashv << 16) ^ _sfh_tmp; \ + _sfh_key += 2U*sizeof (uint16_t); \ + hashv += hashv >> 11; \ + } \ + \ + /* Handle end cases */ \ + switch (_sfh_rem) { \ + case 3: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 16; \ + hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ + hashv += hashv >> 11; \ + break; \ + case 2: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 11; \ + hashv += hashv >> 17; \ + break; \ + case 1: hashv += *_sfh_key; \ + hashv ^= hashv << 10; \ + hashv += hashv >> 1; \ + } \ + \ + /* Force "avalanching" of final 127 bits */ \ + hashv ^= hashv << 3; \ + hashv += hashv >> 5; \ + hashv ^= hashv << 4; \ + hashv += hashv >> 17; \ + hashv ^= hashv << 25; \ + hashv += hashv >> 6; \ + bkt = hashv & (num_bkts-1U); \ +} while(0) + +#ifdef HASH_USING_NO_STRICT_ALIASING +/* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. + * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. + * MurmurHash uses the faster approach only on CPU's where we know it's safe. + * + * Note the preprocessor built-in defines can be emitted using: + * + * gcc -m64 -dM -E - < /dev/null (on gcc) + * cc -## a.c (where a.c is a simple test file) (Sun Studio) + */ +#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)) +#define MUR_GETBLOCK(p,i) p[i] +#else /* non intel */ +#define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL) +#define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL) +#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL) +#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL) +#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL)) +#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__)) +#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24)) +#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16)) +#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8)) +#else /* assume little endian non-intel */ +#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24)) +#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16)) +#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8)) +#endif +#define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \ + (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \ + (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \ + MUR_ONE_THREE(p)))) +#endif +#define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +#define MUR_FMIX(_h) \ +do { \ + _h ^= _h >> 16; \ + _h *= 0x85ebca6bu; \ + _h ^= _h >> 13; \ + _h *= 0xc2b2ae35u; \ + _h ^= _h >> 16; \ +} while(0) + +#define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \ +do { \ + const uint8_t *_mur_data = (const uint8_t*)(key); \ + const int _mur_nblocks = (int)(keylen) / 4; \ + uint32_t _mur_h1 = 0xf88D5353u; \ + uint32_t _mur_c1 = 0xcc9e2d51u; \ + uint32_t _mur_c2 = 0x1b873593u; \ + uint32_t _mur_k1 = 0; \ + const uint8_t *_mur_tail; \ + const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \ + int _mur_i; \ + for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \ + _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \ + _mur_k1 *= _mur_c1; \ + _mur_k1 = MUR_ROTL32(_mur_k1,15); \ + _mur_k1 *= _mur_c2; \ + \ + _mur_h1 ^= _mur_k1; \ + _mur_h1 = MUR_ROTL32(_mur_h1,13); \ + _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \ + } \ + _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \ + _mur_k1=0; \ + switch((keylen) & 3U) { \ + case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \ + case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \ + case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \ + _mur_k1 *= _mur_c1; \ + _mur_k1 = MUR_ROTL32(_mur_k1,15); \ + _mur_k1 *= _mur_c2; \ + _mur_h1 ^= _mur_k1; \ + } \ + _mur_h1 ^= (uint32_t)(keylen); \ + MUR_FMIX(_mur_h1); \ + hashv = _mur_h1; \ + bkt = hashv & (num_bkts-1U); \ +} while(0) +#endif /* HASH_USING_NO_STRICT_ALIASING */ + +/* key comparison function; return 0 if keys equal */ +#define HASH_KEYCMP(a,b,len) memcmp(a,b,(unsigned long)(len)) + +/* iterate over items in a known bucket to find desired item */ +#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ +do { \ + if (head.hh_head != NULL) { DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); } \ + else { out=NULL; } \ + while (out != NULL) { \ + if ((out)->hh.keylen == (keylen_in)) { \ + if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) { break; } \ + } \ + if ((out)->hh.hh_next != NULL) { DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); } \ + else { out = NULL; } \ + } \ +} while(0) + +/* add an item to a bucket */ +#define HASH_ADD_TO_BKT(head,addhh) \ +do { \ + head.count++; \ + (addhh)->hh_next = head.hh_head; \ + (addhh)->hh_prev = NULL; \ + if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \ + (head).hh_head=addhh; \ + if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \ + && ((addhh)->tbl->noexpand != 1U)) { \ + HASH_EXPAND_BUCKETS((addhh)->tbl); \ + } \ +} while(0) + +/* remove an item from a given bucket */ +#define HASH_DEL_IN_BKT(hh,head,hh_del) \ + (head).count--; \ + if ((head).hh_head == hh_del) { \ + (head).hh_head = hh_del->hh_next; \ + } \ + if (hh_del->hh_prev) { \ + hh_del->hh_prev->hh_next = hh_del->hh_next; \ + } \ + if (hh_del->hh_next) { \ + hh_del->hh_next->hh_prev = hh_del->hh_prev; \ + } + +/* Bucket expansion has the effect of doubling the number of buckets + * and redistributing the items into the new buckets. Ideally the + * items will distribute more or less evenly into the new buckets + * (the extent to which this is true is a measure of the quality of + * the hash function as it applies to the key domain). + * + * With the items distributed into more buckets, the chain length + * (item count) in each bucket is reduced. Thus by expanding buckets + * the hash keeps a bound on the chain length. This bounded chain + * length is the essence of how a hash provides constant time lookup. + * + * The calculation of tbl->ideal_chain_maxlen below deserves some + * explanation. First, keep in mind that we're calculating the ideal + * maximum chain length based on the *new* (doubled) bucket count. + * In fractions this is just n/b (n=number of items,b=new num buckets). + * Since the ideal chain length is an integer, we want to calculate + * ceil(n/b). We don't depend on floating point arithmetic in this + * hash, so to calculate ceil(n/b) with integers we could write + * + * ceil(n/b) = (n/b) + ((n%b)?1:0) + * + * and in fact a previous version of this hash did just that. + * But now we have improved things a bit by recognizing that b is + * always a power of two. We keep its base 2 log handy (call it lb), + * so now we can write this with a bit shift and logical AND: + * + * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) + * + */ +#define HASH_EXPAND_BUCKETS(tbl) \ +do { \ + unsigned _he_bkt; \ + unsigned _he_bkt_i; \ + struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ + UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ + _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ + 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ + memset(_he_new_buckets, 0, \ + 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + tbl->ideal_chain_maxlen = \ + (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \ + (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ + tbl->nonideal_items = 0; \ + for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ + { \ + _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ + while (_he_thh != NULL) { \ + _he_hh_nxt = _he_thh->hh_next; \ + HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \ + _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ + if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ + tbl->nonideal_items++; \ + _he_newbkt->expand_mult = _he_newbkt->count / \ + tbl->ideal_chain_maxlen; \ + } \ + _he_thh->hh_prev = NULL; \ + _he_thh->hh_next = _he_newbkt->hh_head; \ + if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \ + _he_thh; } \ + _he_newbkt->hh_head = _he_thh; \ + _he_thh = _he_hh_nxt; \ + } \ + } \ + uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ + tbl->num_buckets *= 2U; \ + tbl->log2_num_buckets++; \ + tbl->buckets = _he_new_buckets; \ + tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ + (tbl->ineff_expands+1U) : 0U; \ + if (tbl->ineff_expands > 1U) { \ + tbl->noexpand=1; \ + uthash_noexpand_fyi(tbl); \ + } \ + uthash_expand_fyi(tbl); \ +} while(0) + + +/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ +/* Note that HASH_SORT assumes the hash handle name to be hh. + * HASH_SRT was added to allow the hash handle name to be passed in. */ +#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) +#define HASH_SRT(hh,head,cmpfcn) \ +do { \ + unsigned _hs_i; \ + unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ + struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ + if (head != NULL) { \ + _hs_insize = 1; \ + _hs_looping = 1; \ + _hs_list = &((head)->hh); \ + while (_hs_looping != 0U) { \ + _hs_p = _hs_list; \ + _hs_list = NULL; \ + _hs_tail = NULL; \ + _hs_nmerges = 0; \ + while (_hs_p != NULL) { \ + _hs_nmerges++; \ + _hs_q = _hs_p; \ + _hs_psize = 0; \ + for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ + _hs_psize++; \ + _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + if (! (_hs_q) ) { break; } \ + } \ + _hs_qsize = _hs_insize; \ + while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\ + if (_hs_psize == 0U) { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL){ \ + _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + } \ + _hs_psize--; \ + } else if (( \ + cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ + ) <= 0) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL){ \ + _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + } \ + _hs_psize--; \ + } else { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } \ + if ( _hs_tail != NULL ) { \ + _hs_tail->next = ((_hs_e != NULL) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ + } else { \ + _hs_list = _hs_e; \ + } \ + if (_hs_e != NULL) { \ + _hs_e->prev = ((_hs_tail != NULL) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ + } \ + _hs_tail = _hs_e; \ + } \ + _hs_p = _hs_q; \ + } \ + if (_hs_tail != NULL){ \ + _hs_tail->next = NULL; \ + } \ + if ( _hs_nmerges <= 1U ) { \ + _hs_looping=0; \ + (head)->hh.tbl->tail = _hs_tail; \ + DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ + } \ + _hs_insize *= 2U; \ + } \ + HASH_FSCK(hh,head); \ + } \ +} while (0) + +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash + * hash handle that must be present in the structure. */ +#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ +do { \ + unsigned _src_bkt, _dst_bkt; \ + void *_last_elt=NULL, *_elt; \ + UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ + ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ + if (src != NULL) { \ + for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ + for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ + _src_hh != NULL; \ + _src_hh = _src_hh->hh_next) { \ + _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ + if (cond(_elt)) { \ + _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ + _dst_hh->key = _src_hh->key; \ + _dst_hh->keylen = _src_hh->keylen; \ + _dst_hh->hashv = _src_hh->hashv; \ + _dst_hh->prev = _last_elt; \ + _dst_hh->next = NULL; \ + if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \ + if (dst == NULL) { \ + DECLTYPE_ASSIGN(dst,_elt); \ + HASH_MAKE_TABLE(hh_dst,dst); \ + } else { \ + _dst_hh->tbl = (dst)->hh_dst.tbl; \ + } \ + HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ + HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ + (dst)->hh_dst.tbl->num_items++; \ + _last_elt = _elt; \ + _last_elt_hh = _dst_hh; \ + } \ + } \ + } \ + } \ + HASH_FSCK(hh_dst,dst); \ +} while (0) + +#define HASH_CLEAR(hh,head) \ +do { \ + if (head != NULL) { \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + (head)=NULL; \ + } \ +} while(0) + +#define HASH_OVERHEAD(hh,head) \ + ((head != NULL) ? ( \ + (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ + ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ + sizeof(UT_hash_table) + \ + (HASH_BLOOM_BYTELEN))) : 0U) + +#ifdef NO_DECLTYPE +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) +#else +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) +#endif + +/* obtain a count of items in the hash */ +#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) + +typedef struct UT_hash_bucket { + struct UT_hash_handle *hh_head; + unsigned count; + + /* expand_mult is normally set to 0. In this situation, the max chain length + * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If + * the bucket's chain exceeds this length, bucket expansion is triggered). + * However, setting expand_mult to a non-zero value delays bucket expansion + * (that would be triggered by additions to this particular bucket) + * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. + * (The multiplier is simply expand_mult+1). The whole idea of this + * multiplier is to reduce bucket expansions, since they are expensive, in + * situations where we know that a particular bucket tends to be overused. + * It is better to let its chain length grow to a longer yet-still-bounded + * value, than to do an O(n) bucket expansion too often. + */ + unsigned expand_mult; + +} UT_hash_bucket; + +/* random signature used only to find hash tables in external analysis */ +#define HASH_SIGNATURE 0xa0111fe1u +#define HASH_BLOOM_SIGNATURE 0xb12220f2u + +typedef struct UT_hash_table { + UT_hash_bucket *buckets; + unsigned num_buckets, log2_num_buckets; + unsigned num_items; + struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ + ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ + + /* in an ideal situation (all buckets used equally), no bucket would have + * more than ceil(#items/#buckets) items. that's the ideal chain length. */ + unsigned ideal_chain_maxlen; + + /* nonideal_items is the number of items in the hash whose chain position + * exceeds the ideal chain maxlen. these items pay the penalty for an uneven + * hash distribution; reaching them in a chain traversal takes >ideal steps */ + unsigned nonideal_items; + + /* ineffective expands occur when a bucket doubling was performed, but + * afterward, more than half the items in the hash had nonideal chain + * positions. If this happens on two consecutive expansions we inhibit any + * further expansion, as it's not helping; this happens when the hash + * function isn't a good fit for the key domain. When expansion is inhibited + * the hash will still work, albeit no longer in constant time. */ + unsigned ineff_expands, noexpand; + + uint32_t signature; /* used only to find hash tables in external analysis */ +#ifdef HASH_BLOOM + uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ + uint8_t *bloom_bv; + uint8_t bloom_nbits; +#endif + +} UT_hash_table; + +typedef struct UT_hash_handle { + struct UT_hash_table *tbl; + void *prev; /* prev element in app order */ + void *next; /* next element in app order */ + struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ + struct UT_hash_handle *hh_next; /* next hh in bucket order */ + void *key; /* ptr to enclosing struct's key */ + unsigned keylen; /* enclosing struct's key len */ + unsigned hashv; /* result of hash-fcn(key) */ +} UT_hash_handle; + +#endif /* UTHASH_H */ diff --git a/src/c/urweb.c b/src/c/urweb.c index 53344c5e..e0fd503c 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -22,6 +22,8 @@ #include "types.h" +#include "uthash.h" + uw_unit uw_unit_v = 0; @@ -4494,3 +4496,148 @@ int uw_remoteSock(uw_context ctx) { void uw_set_remoteSock(uw_context ctx, int sock) { ctx->remoteSock = sock; } + + +// Sqlcache + +void listDelete(CacheList *list, CacheEntry *entry) { + if (list->first == entry) { + list->first = entry->next; + } + if (list->last == entry) { + list->last = entry->prev; + } + if (entry->prev) { + entry->prev->next = entry->next; + } + if (entry->next) { + entry->next->prev = entry->prev; + } + entry->prev = NULL; + entry->next = NULL; + --(list->size); +} + +void listAdd(CacheList *list, CacheEntry *entry) { + if (list->last) { + list->last->next = entry; + entry->prev = list->last; + list->last = entry; + } else { + list->first = entry; + list->last = entry; + } + ++(list->size); +} + +void listBump(CacheList *list, CacheEntry *entry) { + listDelete(list, entry); + listAdd(list, entry); +} + +// TODO: deal with time properly. + +time_t getTimeNow() { + return time(NULL); +} + +time_t timeMax(time_t x, time_t y) { + return difftime(x, y) > 0 ? x : y; +} + +void freeCacheValue(CacheValue *value) { + if (value) { + free(value->result); + free(value->output); + free(value); + } +} + +void delete(Cache *cache, CacheEntry* entry) { + //listDelete(cache->lru, entry); + HASH_DELETE(hh, cache->table, entry); + freeCacheValue(entry->value); + free(entry->key); + free(entry); +} + +CacheValue *checkHelper(Cache *cache, char **keys, int timeInvalid) { + char *key = keys[cache->height]; + CacheEntry *entry; + HASH_FIND(hh, cache->table, key, strlen(key), entry); + timeInvalid = timeMax(timeInvalid, cache->timeInvalid); + if (entry && difftime(entry->timeValid, timeInvalid) > 0) { + if (cache->height == 0) { + // At height 0, entry->value is the desired value. + //listBump(cache->lru, entry); + return entry->value; + } else { + // At height n+1, entry->value is a pointer to a cache at heignt n. + return checkHelper(entry->value, keys, timeInvalid); + } + } else { + return NULL; + } +} + +CacheValue *check(Cache *cache, char **keys) { + return checkHelper(cache, keys, 0); +} + +void storeHelper(Cache *cache, char **keys, CacheValue *value, int timeNow) { + CacheEntry *entry; + char *key = keys[cache->height]; + HASH_FIND(hh, cache->table, key, strlen(key), entry); + if (!entry) { + entry = malloc(sizeof(CacheEntry)); + entry->key = strdup(key); + entry->value = NULL; + HASH_ADD_KEYPTR(hh, cache->table, entry->key, strlen(entry->key), entry); + } + entry->timeValid = timeNow; + if (cache->height == 0) { + //listAdd(cache->lru, entry); + freeCacheValue(entry->value); + entry->value = value; + //if (cache->lru->size > MAX_SIZE) { + //delete(cache, cache->lru->first); + // TODO: return flushed value. + //} + } else { + if (!entry->value) { + Cache *newCache = malloc(sizeof(Cache)); + newCache->table = NULL; + newCache->timeInvalid = timeNow; + newCache->lru = cache->lru; + newCache->height = cache->height - 1; + entry->value = newCache; + } + storeHelper(entry->value, keys, value, timeNow); + } +} + +void store(Cache *cache, char **keys, CacheValue *value) { + storeHelper(cache, keys, value, getTimeNow()); +} + +void flushHelper(Cache *cache, char **keys, int timeNow) { + CacheEntry *entry; + char *key = keys[cache->height]; + if (key) { + HASH_FIND(hh, cache->table, key, strlen(key), entry); + if (entry) { + if (cache->height == 0) { + delete(cache, entry); + } else { + flushHelper(entry->value, keys, timeNow); + } + } + } else { + // Null key means invalidate the entire subtree. + cache->timeInvalid = timeNow; + } +} + +void flush(Cache *cache, char **keys) { + flushHelper(cache, keys, getTimeNow()); +} diff --git a/src/lru_cache.sml b/src/lru_cache.sml new file mode 100644 index 00000000..87e939fa --- /dev/null +++ b/src/lru_cache.sml @@ -0,0 +1,171 @@ +structure LruCache : sig + val cache : Cache.cache +end = struct + + +(* Mono *) + +open Mono + +val dummyLoc = ErrorMsg.dummySpan +val stringTyp = (TFfi ("Basis", "string"), dummyLoc) +val optionStringTyp = (TOption stringTyp, dummyLoc) +fun withTyp typ = map (fn exp => (exp, typ)) + +fun ffiAppCache' (func, index, argTyps) = + EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) + +fun check (index, keys) = + ffiAppCache' ("check", index, withTyp stringTyp keys) + +fun store (index, keys, value) = + ffiAppCache' ("store", index, (value, stringTyp) :: withTyp stringTyp keys) + +fun flush (index, keys) = + ffiAppCache' ("flush", index, withTyp optionStringTyp keys) + + +(* Cjr *) + +open Print +open Print.PD + +fun setupQuery {index, params} = + let + + val i = Int.toString index + + fun paramRepeat itemi sep = + let + fun f n = + if n < 0 then "" + else if n = 0 then itemi (Int.toString 0) + else f (n-1) ^ sep ^ itemi (Int.toString n) + in + f (params - 1) + end + + fun paramRepeatRev itemi sep = + let + fun f n = + if n < 0 then "" + else if n = 0 then itemi (Int.toString 0) + else itemi (Int.toString n) ^ sep ^ f (n-1) + in + f (params - 1) + end + + fun paramRepeatInit itemi sep = + if params = 0 then "" else sep ^ paramRepeat itemi sep + + val typedArgs = paramRepeatInit (fn p => "uw_Basis_string p" ^ p) ", " + + val revArgs = paramRepeatRev (fn p => "p" ^ p) ", " + + in + Print.box + [string ("static Cache cacheStruct" ^ i ^ " = {"), + newline, + string " .table = NULL,", + newline, + string " .timeInvalid = 0,", + newline, + string " .lru = NULL,", + newline, + string (" .height = " ^ Int.toString (params - 1) ^ "};"), + newline, + string ("static Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), + newline, + newline, + + string ("static uw_Basis_string uw_Sqlcache_check" ^ i), + string ("(uw_context ctx" ^ typedArgs ^ ") {"), + newline, + string (" char *ks[] = {" ^ revArgs ^ "};"), + newline, + string (" CacheValue *v = check(cache" ^ i ^ ", ks);"), + newline, + string " if (v) {", + newline, + string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), + newline, + string " uw_write(ctx, v->output);", + newline, + string " return v->result;", + newline, + string " } else {", + newline, + string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), + newline, + string " uw_recordingStart(ctx);", + newline, + string " return NULL;", + newline, + string " }", + newline, + string "}", + newline, + newline, + + string ("static uw_unit uw_Sqlcache_store" ^ i), + string ("(uw_context ctx, uw_Basis_string s" ^ typedArgs ^ ") {"), + newline, + string (" char *ks[] = {" ^ revArgs ^ "};"), + newline, + string (" CacheValue *v = malloc(sizeof(CacheValue));"), + newline, + string " v->result = strdup(s);", + newline, + string " v->output = uw_recordingRead(ctx);", + newline, + string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), + newline, + string (" store(cache" ^ i ^ ", ks, v);"), + newline, + string " return uw_unit_v;", + newline, + string "}", + newline, + newline, + + string ("static uw_unit uw_Sqlcache_flush" ^ i), + string ("(uw_context ctx" ^ typedArgs ^ ") {"), + newline, + string (" char *ks[] = {" ^ revArgs ^ "};"), + newline, + string (" flush(cache" ^ i ^ ", ks);"), + newline, + string " return uw_unit_v;", + newline, + string "}", + newline, + newline] + end + +val setupGlobal = string "/* No global setup for LRU cache. */" + + +(* Bundled up. *) + +(* For now, use the toy implementation if there are no arguments. *) +fun toyIfNoKeys numKeys implLru implToy args = + if numKeys args = 0 + then implToy args + else implLru args + +val cache = + let + val {check = toyCheck, + store = toyStore, + flush = toyFlush, + setupQuery = toySetupQuery, + ...} = ToyCache.cache + in + {check = toyIfNoKeys (length o #2) check toyCheck, + store = toyIfNoKeys (length o #2) store toyStore, + flush = toyIfNoKeys (length o #2) flush toyFlush, + setupQuery = toyIfNoKeys #params setupQuery toySetupQuery, + setupGlobal = setupGlobal} + end + +end diff --git a/src/sources b/src/sources index aaf640ca..0608d710 100644 --- a/src/sources +++ b/src/sources @@ -177,6 +177,7 @@ $(SRC)/multimap_fn.sml $(SRC)/cache.sml $(SRC)/toy_cache.sml +$(SRC)/lru_cache.sml $(SRC)/sqlcache.sig $(SRC)/sqlcache.sml diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 3082904c..bf9ee77a 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -39,7 +39,7 @@ val ffiEffectful = andalso not (m = "Basis" andalso SS.member (fs, f)) end -val cache = ref ToyCache.cache +val cache = ref LruCache.cache fun setCache c = cache := c fun getCache () = !cache @@ -52,8 +52,8 @@ fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : exp -> false, then expression is definitely not effectful if effs is fully populated. The intended pattern is to use this a number of times equal to the number of declarations in a file, Bellman-Ford style. *) - (* TODO: make incrementing of bound less janky, probably by using [MonoUtil] - instead of all this. *) + (* TODO: make incrementing of the number of bound variables cleaner, + probably by using [MonoUtil] instead of all this. *) let (* DEBUG: remove printing when done. *) fun tru msg = if doPrint then (print (msg ^ "\n"); true) else true @@ -138,14 +138,14 @@ val effectfulMap = (* Boolean formula normalization. *) -datatype normalForm = Cnf | Dnf +datatype junctionType = Conj | Disj datatype 'atom formula = Atom of 'atom | Negate of 'atom formula - | Combo of normalForm * 'atom formula list + | Combo of junctionType * 'atom formula list -val flipNf = fn Cnf => Dnf | Dnf => Cnf +val flipJt = fn Conj => Disj | Disj => Conj fun bind xs f = List.concat (map f xs) @@ -158,7 +158,7 @@ val rec cartesianProduct : 'a list list -> 'a list list = fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = fn Atom x => Atom (if negating then negate x else x) | Negate f => pushNegate negate (not negating) f - | Combo (n, fs) => Combo (if negating then flipNf n else n, map (pushNegate negate negating) fs) + | Combo (n, fs) => Combo (if negating then flipJt n else n, map (pushNegate negate negating) fs) val rec flatten = fn Combo (n, fs) => @@ -170,17 +170,17 @@ val rec flatten = (map flatten fs)) | f => f -fun normalize' (negate : 'atom -> 'atom) (norm : normalForm) = +fun normalize' (negate : 'atom -> 'atom) (junc : junctionType) = fn Atom x => [[x]] - | Negate f => map (map negate) (normalize' negate (flipNf norm) f) - | Combo (n, fs) => + | Negate f => map (map negate) (normalize' negate (flipJt junc) f) + | Combo (j, fs) => let - val fss = bind fs (normalize' negate n) + val fss = bind fs (normalize' negate j) in - if n = norm then fss else cartesianProduct fss + if j = junc then fss else cartesianProduct fss end -fun normalize negate norm = normalize' negate norm o flatten o pushNegate negate false +fun normalize negate junc = normalize' negate junc o flatten o pushNegate negate false fun mapFormula mf = fn Atom x => Atom (mf x) @@ -200,36 +200,29 @@ datatype atomExp = | Prim of Prim.t | Field of string * string -val equalAtomExp = - let - val isEqual = fn EQUAL => true | _ => false - in - fn (QueryArg n1, QueryArg n2) => n1 = n2 - | (DmlRel n1, DmlRel n2) => n1 = n2 - | (Prim p1, Prim p2) => isEqual (Prim.compare (p1, p2)) - | (Field (t1, f1), Field (t2, f2)) => isEqual (String.compare (t1 ^ "." ^ f1, t2 ^ "." ^ f2)) - | _ => false - end - structure AtomExpKey : ORD_KEY = struct -type ord_key = atomExp - -val compare = - fn (QueryArg n1, QueryArg n2) => Int.compare (n1, n2) - | (QueryArg _, _) => LESS - | (_, QueryArg _) => GREATER - | (DmlRel n1, DmlRel n2) => Int.compare (n1, n2) - | (DmlRel _, _) => LESS - | (_, DmlRel _) => GREATER - | (Prim p1, Prim p2) => Prim.compare (p1, p2) - | (Prim _, _) => LESS - | (_, Prim _) => GREATER - | (Field (t1, f1), Field (t2, f2)) => String.compare (t1 ^ "." ^ f1, t2 ^ "." ^ f2) + type ord_key = atomExp + + val compare = + fn (QueryArg n1, QueryArg n2) => Int.compare (n1, n2) + | (QueryArg _, _) => LESS + | (_, QueryArg _) => GREATER + | (DmlRel n1, DmlRel n2) => Int.compare (n1, n2) + | (DmlRel _, _) => LESS + | (_, DmlRel _) => GREATER + | (Prim p1, Prim p2) => Prim.compare (p1, p2) + | (Prim _, _) => LESS + | (_, Prim _) => GREATER + | (Field (t1, f1), Field (t2, f2)) => + case String.compare (t1, t2) of + EQUAL => String.compare (f1, f2) + | ord => ord end structure UF = UnionFindFn(AtomExpKey) + val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> atomExp IM.map list = @@ -246,7 +239,9 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula o List.mapPartial toKnownEquality fun addToEqs (eqs, n, e) = case IM.find (eqs, n) of - (* Comparing to a constant seems better? *) + (* Comparing to a constant is probably better than comparing to + a variable? Checking that an existing constant matches a new + one is handled by [accumulateEqs]. *) SOME (Prim _) => eqs | _ => IM.insert (eqs, n, e) val accumulateEqs = @@ -263,6 +258,9 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. This would involve guarding the invalidation with a check for the relevant comparisons. *) + (* DEBUG: remove these print statements. *) + (* | ((DmlRel r, Prim p), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) + (* | ((Prim p, DmlRel r), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) | (_, eqso) => eqso val eqsOfClass : atomExp list -> atomExp IM.map option = List.foldl accumulateEqs (SOME IM.empty) @@ -275,7 +273,8 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula | Sql.Field tf => SOME (Field tf) | Sql.Inj (EPrim p, _) => SOME (Prim p) | Sql.Inj (ERel n, _) => SOME (rel n) - (* We can't deal with anything else. *) + (* We can't deal with anything else, e.g., CURRENT_TIMESTAMP + becomes Sql.Unmodeled, which becomes NONE here. *) | _ => NONE in (cmp, qa e1, qa e2) @@ -302,17 +301,17 @@ val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) (SOME IM.empty) fun dnf (fQuery, fDml) = - normalize negateCmp Dnf (Combo (Cnf, [markQuery fQuery, markDml fDml])) + normalize negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) in List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf end val rec sqexpToFormula = - fn Sql.SqTrue => Combo (Cnf, []) - | Sql.SqFalse => Combo (Dnf, []) + fn Sql.SqTrue => Combo (Conj, []) + | Sql.SqFalse => Combo (Disj, []) | Sql.SqNot e => Negate (sqexpToFormula e) | Sql.Binop (Sql.RCmp c, e1, e2) => Atom (c, e1, e2) - | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Cnf | Sql.Or => Dnf, + | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Conj | Sql.Or => Disj, [sqexpToFormula p1, sqexpToFormula p2]) (* ASK: any other sqexps that can be props? *) | _ => raise Match @@ -332,13 +331,13 @@ fun renameTables tablePairs = end val rec queryToFormula = - fn Sql.Query1 {Where = NONE, ...} => Combo (Cnf, []) + fn Sql.Query1 {Where = NONE, ...} => Combo (Conj, []) | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => renameTables tablePairs (sqexpToFormula e) - | Sql.Union (q1, q2) => Combo (Dnf, [queryToFormula q1, queryToFormula q2]) + | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula q1, queryToFormula q2]) fun valsToFormula (table, vals) = - Combo (Cnf, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) + Combo (Conj, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) val rec dmlToFormula = fn Sql.Insert (table, vals) => valsToFormula (table, vals) @@ -354,8 +353,8 @@ val rec dmlToFormula = val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) in renameTables [(table, "T")] - (Combo (Dnf, [Combo (Cnf, [fVals, mark fWhere]), - Combo (Cnf, [mark fVals, fWhere])])) + (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), + Combo (Conj, [mark fVals, fWhere])])) end val rec tablesQuery = @@ -370,6 +369,13 @@ val tableDml = (* Program instrumentation. *) +val varName = + let + val varNumber = ref 0 + in + fn s => (varNumber := !varNumber + 1; s ^ Int.toString (!varNumber)) + end + val {check, store, flush, ...} = getCache () val dummyLoc = ErrorMsg.dummySpan @@ -412,8 +418,8 @@ fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = in ECase (check, [((PNone stringTyp, loc), - (ELet ("q", resultTyp, query, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), + (ELet (varName "q", resultTyp, query, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), (* Boolean is false because we're not unurlifying from a cookie. *) (EUnurlify (rel0, resultTyp, false), loc))], {disc = stringTyp, result = resultTyp}) @@ -454,7 +460,7 @@ fun factorOutNontrivial text = chunks fun wrapLets e' = (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet ("sqlArg", stringTyp, v, (e', loc))) + List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) e' newVariables val numArgs = length newVariables @@ -482,6 +488,7 @@ fun addChecking file = exps = exps}, dummyLoc) val (EQuery {query = queryText, ...}, _) = queryExp + (* DEBUG: we can remove the following line at some point. *) val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x @@ -530,9 +537,11 @@ fun invalidations ((query, numArgs), dml) = (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here represents unknown, which means a wider invalidation. *) val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = - fn ([], []) => true + fn ([], []) => (print "hey!\n"; true) | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) - | (SOME x :: xs, SOME y :: ys) => equalAtomExp (x, y) andalso madeRedundantBy (xs, ys) + | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of + EQUAL => madeRedundantBy (xs, ys) + | _ => false) | _ => false fun removeRedundant' (xss, yss) = case xss of -- cgit v1.2.3 From aa2da68f6bfc3649fcb43afa1b88909ef278ac60 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 29 Jun 2015 01:33:47 -0700 Subject: Refactored a lot and fixed an and/or swap, but still not good on current test. --- caching-tests/test.ur | 42 +++-- src/sql.sml | 4 +- src/sqlcache.sml | 426 +++++++++++++++++++++++++++++++++----------------- 3 files changed, 306 insertions(+), 166 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 842fd77d..ba3a337d 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -11,29 +11,27 @@ fun cache id = | Some row => {[row.Tab.Val]}} + fun flush id = - res <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[id]}); - (case res of - None => return () (* dml (INSERT INTO tab (Id, Val) *) - (* VALUES ({[id]}, 0)) *) - | Some row => dml (UPDATE tab - SET Val = {[row.Tab.Val + 1]} - WHERE Id = {[id + 1]} OR Id = {[id]} (* OR Id = {[id - 1]} *))); + dml (UPDATE tab + SET Val = 42 + WHERE Id = {[id]} OR Id = {[id + 1]}); return - {case res of - None => Initialized {[id]}! - | Some row => Incremented {[id]}!} + Changed {[id]}! -(* task periodic 5 = *) -(* fn () => *) -(* t <- now; *) -(* let *) -(* val n = toSeconds t % 2 *) -(* in *) -(* dml (UPDATE tab *) -(* SET Val = 9001 *) -(* WHERE Id = {[n]} OR Id = {[n+1]}) *) -(* end *) +(* fun flush id = *) +(* res <- oneOrNoRows (SELECT tab.Val *) +(* FROM tab *) +(* WHERE tab.Id = {[id]}); *) +(* (case res of *) +(* None => dml (INSERT INTO tab (Id, Val) *) +(* VALUES ({[id]}, 0)) *) +(* | Some row => dml (UPDATE tab *) +(* SET Val = {[row.Tab.Val + 1]} *) +(* WHERE Id = {[id]} OR Id = {[id + 1]})); *) +(* return *) +(* {case res of *) +(* None => Initialized {[id]}! *) +(* | Some row => Incremented {[id]}!} *) +(* *) diff --git a/src/sql.sml b/src/sql.sml index 59b4eac6..22ffea39 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -214,8 +214,8 @@ val sqbrel = altL [cmp "=" Eq, cmp "<" Lt, cmp ">=" Ge, cmp ">" Gt, - wrap (const "AND") (fn () => RLop Or), - wrap (const "OR") (fn () => RLop And)] + wrap (const "AND") (fn () => RLop And), + wrap (const "OR") (fn () => RLop Or)] datatype ('a, 'b) sum = inl of 'a | inr of 'b diff --git a/src/sqlcache.sml b/src/sqlcache.sml index bf9ee77a..b259f2cb 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache :> SQLCACHE = struct +structure Sqlcache (* DEBUG: add back :> SQLCACHE. *) = struct open Mono @@ -147,12 +147,12 @@ datatype 'atom formula = val flipJt = fn Conj => Disj | Disj => Conj -fun bind xs f = List.concat (map f xs) +fun listBind xs f = List.concat (map f xs) val rec cartesianProduct : 'a list list -> 'a list list = fn [] => [[]] - | (xs :: xss) => bind (cartesianProduct xss) - (fn ys => bind xs (fn x => [x :: ys])) + | (xs :: xss) => listBind (cartesianProduct xss) + (fn ys => listBind xs (fn x => [x :: ys])) (* Pushes all negation to the atoms.*) fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = @@ -161,35 +161,123 @@ fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = | Combo (n, fs) => Combo (if negating then flipJt n else n, map (pushNegate negate negating) fs) val rec flatten = - fn Combo (n, fs) => - Combo (n, List.foldr (fn (f, acc) => + fn Combo (_, [f]) => flatten f + | Combo (j, fs) => + Combo (j, List.foldr (fn (f, acc) => case f of - Combo (n', fs') => if n = n' then fs' @ acc else f :: acc + Combo (j', fs') => + if j = j' orelse length fs' = 1 + then fs' @ acc + else f :: acc | _ => f :: acc) [] (map flatten fs)) | f => f -fun normalize' (negate : 'atom -> 'atom) (junc : junctionType) = - fn Atom x => [[x]] - | Negate f => map (map negate) (normalize' negate (flipJt junc) f) - | Combo (j, fs) => +fun normalize' ((simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate) + : ('a list list -> 'a list list) + * ('a list -> 'a list) + * ('a list -> 'a list) + * ('a -> 'a)) + (junc : junctionType) = let - val fss = bind fs (normalize' negate j) + fun simplify junc = simplifyLists o map (case junc of + Conj => simplifyAtomsConj + | Disj => simplifyAtomsDisj) + fun norm junc = + simplify junc + o (fn Atom x => [[x]] + | Negate f => map (map negate) (norm (flipJt junc) f) + | Combo (j, fs) => + let + val fss = listBind fs (norm j) + in + if j = junc then fss else cartesianProduct fss + end) in - if j = junc then fss else cartesianProduct fss + norm junc end -fun normalize negate junc = normalize' negate junc o flatten o pushNegate negate false +fun normalize (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate, junc) = + (normalize' (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate) junc) + o flatten + o pushNegate negate false fun mapFormula mf = fn Atom x => Atom (mf x) | Negate f => Negate (mapFormula mf f) - | Combo (n, fs) => Combo (n, map (mapFormula mf) fs) + | Combo (j, fs) => Combo (j, map (mapFormula mf) fs) (* SQL analysis. *) +structure CmpKey : ORD_KEY = struct + + type ord_key = Sql.cmp + + val compare = + fn (Sql.Eq, Sql.Eq) => EQUAL + | (Sql.Eq, _) => LESS + | (_, Sql.Eq) => GREATER + | (Sql.Ne, Sql.Ne) => EQUAL + | (Sql.Ne, _) => LESS + | (_, Sql.Ne) => GREATER + | (Sql.Lt, Sql.Lt) => EQUAL + | (Sql.Lt, _) => LESS + | (_, Sql.Lt) => GREATER + | (Sql.Le, Sql.Le) => EQUAL + | (Sql.Le, _) => LESS + | (_, Sql.Le) => GREATER + | (Sql.Gt, Sql.Gt) => EQUAL + | (Sql.Gt, _) => LESS + | (_, Sql.Gt) => GREATER + | (Sql.Ge, Sql.Ge) => EQUAL + +end + + +functor ListKeyFn (K : ORD_KEY) : ORD_KEY = struct + + type ord_key = K.ord_key list + + val rec compare = + fn ([], []) => EQUAL + | ([], _) => LESS + | (_, []) => GREATER + | (x :: xs, y :: ys) => (case K.compare (x, y) of + EQUAL => compare (xs, ys) + | ord => ord) + +end + +functor OptionKeyFn (K : ORD_KEY) : ORD_KEY = struct + + type ord_key = K.ord_key option + + val compare = + fn (NONE, NONE) => EQUAL + | (NONE, _) => LESS + | (_, NONE) => GREATER + | (SOME x, SOME y) => K.compare (x, y) + +end + +functor TripleKeyFn (structure I : ORD_KEY + structure J : ORD_KEY + structure K : ORD_KEY) + : ORD_KEY where type ord_key = I.ord_key * J.ord_key * K.ord_key = struct + + type ord_key = I.ord_key * J.ord_key * K.ord_key + + fun compare ((i1, j1, k1), (i2, j2, k2)) = + case I.compare (i1, i2) of + EQUAL => (case J.compare (j1, j2) of + EQUAL => K.compare (k1, k2) + | ord => ord) + | ord => ord + +end + val rec chooseTwos : 'a list -> ('a * 'a) list = fn [] => [] | x :: ys => map (fn y => (x, y)) ys @ chooseTwos ys @@ -223,88 +311,121 @@ end structure UF = UnionFindFn(AtomExpKey) -val conflictMaps : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula - * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula - -> atomExp IM.map list = - let - val toKnownEquality = - (* [NONE] here means unkown. Anything that isn't a comparison between - two knowns shouldn't be used, and simply dropping unused terms is - okay in disjunctive normal form. *) - fn (Sql.Eq, SOME e1, SOME e2) => SOME (e1, e2) - | _ => NONE - val equivClasses : (Sql.cmp * atomExp option * atomExp option) list -> atomExp list list = - UF.classes - o List.foldl UF.union' UF.empty - o List.mapPartial toKnownEquality - fun addToEqs (eqs, n, e) = - case IM.find (eqs, n) of - (* Comparing to a constant is probably better than comparing to - a variable? Checking that an existing constant matches a new - one is handled by [accumulateEqs]. *) - SOME (Prim _) => eqs - | _ => IM.insert (eqs, n, e) - val accumulateEqs = - (* [NONE] means we have a contradiction. *) - fn (_, NONE) => NONE - | ((Prim p1, Prim p2), eqso) => - (case Prim.compare (p1, p2) of - EQUAL => eqso - | _ => NONE) - | ((QueryArg n, Prim p), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) - | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) - | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) - | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) - (* TODO: deal with equalities involving just [DmlRel]s and [Prim]s. - This would involve guarding the invalidation with a check for the - relevant comparisons. *) - (* DEBUG: remove these print statements. *) - (* | ((DmlRel r, Prim p), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) - (* | ((Prim p, DmlRel r), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) - | (_, eqso) => eqso - val eqsOfClass : atomExp list -> atomExp IM.map option = - List.foldl accumulateEqs (SOME IM.empty) - o chooseTwos - fun toAtomExps rel (cmp, e1, e2) = - let - val qa = - (* Here [NONE] means unkown. *) - fn Sql.SqConst p => SOME (Prim p) - | Sql.Field tf => SOME (Field tf) - | Sql.Inj (EPrim p, _) => SOME (Prim p) - | Sql.Inj (ERel n, _) => SOME (rel n) - (* We can't deal with anything else, e.g., CURRENT_TIMESTAMP - becomes Sql.Unmodeled, which becomes NONE here. *) - | _ => NONE - in - (cmp, qa e1, qa e2) - end - fun negateCmp (cmp, e1, e2) = - (case cmp of - Sql.Eq => Sql.Ne - | Sql.Ne => Sql.Eq - | Sql.Lt => Sql.Ge - | Sql.Le => Sql.Gt - | Sql.Gt => Sql.Le - | Sql.Ge => Sql.Lt, - e1, e2) - val markQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> - (Sql.cmp * atomExp option * atomExp option) formula = - mapFormula (toAtomExps QueryArg) - val markDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> - (Sql.cmp * atomExp option * atomExp option) formula = - mapFormula (toAtomExps DmlRel) - (* No eqs should have key conflicts because no variable is in two - equivalence classes, so the [#1] can be anything. *) - val mergeEqs : (atomExp IntBinaryMap.map option list - -> atomExp IntBinaryMap.map option) = - List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) - (SOME IM.empty) - fun dnf (fQuery, fDml) = - normalize negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) - in - List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf - end +structure ConflictMaps = struct + + structure TK = TripleKeyFn(structure I = CmpKey + structure J = OptionKeyFn(AtomExpKey) + structure K = OptionKeyFn(AtomExpKey)) + structure TS = BinarySetFn(TK) + structure TLS = BinarySetFn(ListKeyFn(TK)) + + val toKnownEquality = + (* [NONE] here means unkown. Anything that isn't a comparison between two + knowns shouldn't be used, and simply dropping unused terms is okay in + disjunctive normal form. *) + fn (Sql.Eq, SOME e1, SOME e2) => SOME (e1, e2) + | _ => NONE + + val equivClasses : (Sql.cmp * atomExp option * atomExp option) list -> atomExp list list = + UF.classes + o List.foldl UF.union' UF.empty + o List.mapPartial toKnownEquality + + fun addToEqs (eqs, n, e) = + case IM.find (eqs, n) of + (* Comparing to a constant is probably better than comparing to a + variable? Checking that existing constants match a new ones is + handled by [accumulateEqs]. *) + SOME (Prim _) => eqs + | _ => IM.insert (eqs, n, e) + + val accumulateEqs = + (* [NONE] means we have a contradiction. *) + fn (_, NONE) => NONE + | ((Prim p1, Prim p2), eqso) => + (case Prim.compare (p1, p2) of + EQUAL => eqso + | _ => NONE) + | ((QueryArg n, Prim p), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) + | ((QueryArg n, DmlRel r), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) + | ((Prim p, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, Prim p)) + | ((DmlRel r, QueryArg n), SOME eqs) => SOME (addToEqs (eqs, n, DmlRel r)) + (* TODO: deal with equalities between [DmlRel]s and [Prim]s. + This would involve guarding the invalidation with a check for the + relevant comparisons. *) + (* DEBUG: remove these print statements. *) + (* | ((DmlRel r, Prim p), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) + (* | ((Prim p, DmlRel r), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) + | (_, eqso) => eqso + + val eqsOfClass : atomExp list -> atomExp IM.map option = + List.foldl accumulateEqs (SOME IM.empty) + o chooseTwos + + fun toAtomExps rel (cmp, e1, e2) = + let + val qa = + (* Here [NONE] means unkown. *) + fn Sql.SqConst p => SOME (Prim p) + | Sql.Field tf => SOME (Field tf) + | Sql.Inj (EPrim p, _) => SOME (Prim p) + | Sql.Inj (ERel n, _) => SOME (rel n) + (* We can't deal with anything else, e.g., CURRENT_TIMESTAMP + becomes Sql.Unmodeled, which becomes NONE here. *) + | _ => NONE + in + (cmp, qa e1, qa e2) + end + + fun negateCmp (cmp, e1, e2) = + (case cmp of + Sql.Eq => Sql.Ne + | Sql.Ne => Sql.Eq + | Sql.Lt => Sql.Ge + | Sql.Le => Sql.Gt + | Sql.Gt => Sql.Le + | Sql.Ge => Sql.Lt, + e1, e2) + + val markQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> + (Sql.cmp * atomExp option * atomExp option) formula = + mapFormula (toAtomExps QueryArg) + + val markDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> + (Sql.cmp * atomExp option * atomExp option) formula = + mapFormula (toAtomExps DmlRel) + (* No eqs should have key conflicts because no variable is in two + equivalence classes, so the [#1] could be [#2]. *) + + val mergeEqs : (atomExp IntBinaryMap.map option list + -> atomExp IntBinaryMap.map option) = + List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) + (SOME IM.empty) + + fun dnf (fQuery, fDml) = + let + val isStar = + (* TODO: decide if this is okay and, if so, factor out magic + string "*" to a common location. *) + (* First guess: definitely okay for conservative approximation, + though information lost might be useful even in current + implementation for finding an extra equality. *) + fn SOME (Field (_, field)) => String.isSuffix "*" field + | _ => false + fun canIgnore (_, a1, a2) = isStar a1 orelse isStar a2 + fun simplifyLists xs = TLS.listItems (TLS.addList (TLS.empty, xs)) + fun simplifyAtomsConj xs = TS.listItems (TS.addList (TS.empty, xs)) + val simplifyAtomsDisj = simplifyAtomsConj o List.filter canIgnore + in + normalize (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negateCmp, Disj) + (Combo (Conj, [markQuery fQuery, markDml fDml])) + end + + val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf + +end + +val conflictMaps = ConflictMaps.conflictMaps val rec sqexpToFormula = fn Sql.SqTrue => Combo (Conj, []) @@ -488,7 +609,7 @@ fun addChecking file = exps = exps}, dummyLoc) val (EQuery {query = queryText, ...}, _) = queryExp - (* DEBUG: we can remove the following line at some point. *) + (* DEBUG *) val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x @@ -515,47 +636,64 @@ fun addChecking file = fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty, 0) end -fun invalidations ((query, numArgs), dml) = - let - val loc = dummyLoc - val optionAtomExpToExp = - fn NONE => (ENone stringTyp, loc) - | SOME e => (ESome (stringTyp, - (case e of - DmlRel n => ERel n - | Prim p => EPrim p - (* TODO: make new type containing only these two. *) - | _ => raise Match, - loc)), - loc) - fun eqsToInvalidation eqs = - let - fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) - in - inv (numArgs - 1) - end - (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here - represents unknown, which means a wider invalidation. *) - val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = - fn ([], []) => (print "hey!\n"; true) - | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) - | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of - EQUAL => madeRedundantBy (xs, ys) - | _ => false) - | _ => false - fun removeRedundant' (xss, yss) = - case xss of - [] => yss - | xs :: xss' => - removeRedundant' (xss', - if List.exists (fn ys => madeRedundantBy (xs, ys)) (xss' @ yss) - then yss - else xs :: yss) - fun removeRedundant xss = removeRedundant' (xss, []) - val eqss = conflictMaps (queryToFormula query, dmlToFormula dml) - in - (map (map optionAtomExpToExp) o removeRedundant o map eqsToInvalidation) eqss - end +structure Invalidations = struct + + val loc = dummyLoc + + val optionAtomExpToExp = + fn NONE => (ENone stringTyp, loc) + | SOME e => (ESome (stringTyp, + (case e of + DmlRel n => ERel n + | Prim p => EPrim p + (* TODO: make new type containing only these two. *) + | _ => raise Match, + loc)), + loc) + + fun eqsToInvalidation numArgs eqs = + let + fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) + in + inv (numArgs - 1) + end + + (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here + represents unknown, which means a wider invalidation. *) + val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = + fn ([], []) => true + | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) + | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of + EQUAL => madeRedundantBy (xs, ys) + | _ => false) + | _ => false + + fun removeRedundant' (xss, yss) = + case xss of + [] => yss + | xs :: xss' => + removeRedundant' (xss', + if List.exists (fn ys => madeRedundantBy (xs, ys)) (xss' @ yss) + then yss + else xs :: yss) + + fun removeRedundant xss = removeRedundant' (xss, []) + + fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) + + fun invalidations ((query, numArgs), dml) = + (map (map optionAtomExpToExp) + o removeRedundant + o map (eqsToInvalidation numArgs) + o eqss) + (query, dml) + +end + +val invalidations = Invalidations.invalidations + +(* DEBUG *) +val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = let @@ -567,14 +705,16 @@ fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) - (* DEBUG: we can remove the following line at some point. *) + (* DEBUG *) val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) val invs = case Sql.parse Sql.dml dmlText of SOME dmlParsed => map (fn i => (case IM.find (indexToQueryNumArgs, i) of SOME queryNumArgs => - (i, invalidations (queryNumArgs, dmlParsed)) + (* DEBUG *) + (gunk := (queryNumArgs, dmlParsed) :: !gunk; + (i, invalidations (queryNumArgs, dmlParsed))) (* TODO: fail more gracefully. *) | NONE => raise Match)) (SIMM.findList (tableToIndices, tableDml dmlParsed)) @@ -585,6 +725,8 @@ fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = end | e' => e' in + (* DEBUG *) + gunk := []; fileMap doExp file end @@ -606,7 +748,7 @@ val inlineSql = fun go file = let - (* TODO: do something nicer than having Sql be in one of two modes. *) + (* TODO: do something nicer than [Sql] being in one of two modes. *) val () = (resetFfiInfo (); Sql.sqlcacheMode := true) val file' = addFlushing (addChecking (inlineSql file)) val () = Sql.sqlcacheMode := false -- cgit v1.2.3 From fdcc98562df1f37600d9b944371adcb08c3741f0 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 30 Jun 2015 01:56:22 -0700 Subject: Major DNF-calculation performance decrapification. --- caching-tests/test.ur | 2 +- src/sqlcache.sml | 41 ++++++++++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index ba3a337d..6721a464 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -15,7 +15,7 @@ fun cache id = fun flush id = dml (UPDATE tab SET Val = 42 - WHERE Id = {[id]} OR Id = {[id + 1]}); + WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); return Changed {[id]}! diff --git a/src/sqlcache.sml b/src/sqlcache.sml index b259f2cb..f06a9085 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -147,12 +147,12 @@ datatype 'atom formula = val flipJt = fn Conj => Disj | Disj => Conj -fun listBind xs f = List.concat (map f xs) +fun concatMap f xs = List.concat (map f xs) val rec cartesianProduct : 'a list list -> 'a list list = fn [] => [[]] - | (xs :: xss) => listBind (cartesianProduct xss) - (fn ys => listBind xs (fn x => [x :: ys])) + | (xs :: xss) => concatMap (fn ys => concatMap (fn x => [x :: ys]) xs) + (cartesianProduct xss) (* Pushes all negation to the atoms.*) fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = @@ -174,32 +174,44 @@ val rec flatten = (map flatten fs)) | f => f -fun normalize' ((simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate) +fun normPlz (junc : junctionType) = + fn Atom x => [[x]] + | Combo (j, fs) => + let + val fss = map (normPlz junc) fs + in + if j = junc + then List.concat fss + else map List.concat (cartesianProduct fss) + end + (* Excluded by pushNegate. *) + | Negate _ => raise Match + +fun normalize' ((simplifyLists, simplifyAtoms, negate) : ('a list list -> 'a list list) - * ('a list -> 'a list) * ('a list -> 'a list) * ('a -> 'a)) (junc : junctionType) = let - fun simplify junc = simplifyLists o map (case junc of - Conj => simplifyAtomsConj - | Disj => simplifyAtomsDisj) + fun simplify junc = simplifyLists o map simplifyAtoms fun norm junc = simplify junc o (fn Atom x => [[x]] | Negate f => map (map negate) (norm (flipJt junc) f) | Combo (j, fs) => let - val fss = listBind fs (norm j) + val fss = map (norm junc) fs in - if j = junc then fss else cartesianProduct fss + if j = junc + then List.concat fss + else map List.concat (cartesianProduct fss) end) in norm junc end -fun normalize (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate, junc) = - (normalize' (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negate) junc) +fun normalize (simplifyLists, simplifyAtoms, negate, junc) = + (normalize' (simplifyLists, simplifyAtoms, negate) junc) o flatten o pushNegate negate false @@ -414,10 +426,9 @@ structure ConflictMaps = struct | _ => false fun canIgnore (_, a1, a2) = isStar a1 orelse isStar a2 fun simplifyLists xs = TLS.listItems (TLS.addList (TLS.empty, xs)) - fun simplifyAtomsConj xs = TS.listItems (TS.addList (TS.empty, xs)) - val simplifyAtomsDisj = simplifyAtomsConj o List.filter canIgnore + fun simplifyAtoms xs = TS.listItems (TS.addList (TS.empty, xs)) in - normalize (simplifyLists, simplifyAtomsConj, simplifyAtomsDisj, negateCmp, Disj) + normalize (simplifyLists, simplifyAtoms, negateCmp, Disj) (Combo (Conj, [markQuery fQuery, markDml fDml])) end -- cgit v1.2.3 From 1622d36926c950ed34757dea78edfb4cd3463578 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 5 Jul 2015 23:57:28 -0700 Subject: Fix bug in redundancy checking and use finer formula for UPDATE statements. --- src/sqlcache.sml | 86 ++++++++++++++++++++++---------------------------------- 1 file changed, 33 insertions(+), 53 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index f06a9085..d5f6c1c0 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -174,28 +174,12 @@ val rec flatten = (map flatten fs)) | f => f -fun normPlz (junc : junctionType) = - fn Atom x => [[x]] - | Combo (j, fs) => - let - val fss = map (normPlz junc) fs - in - if j = junc - then List.concat fss - else map List.concat (cartesianProduct fss) - end - (* Excluded by pushNegate. *) - | Negate _ => raise Match - -fun normalize' ((simplifyLists, simplifyAtoms, negate) - : ('a list list -> 'a list list) - * ('a list -> 'a list) - * ('a -> 'a)) +fun normalize' (simplify : 'a list list -> 'a list list) + (negate : 'a -> 'a) (junc : junctionType) = let - fun simplify junc = simplifyLists o map simplifyAtoms fun norm junc = - simplify junc + simplify o (fn Atom x => [[x]] | Negate f => map (map negate) (norm (flipJt junc) f) | Combo (j, fs) => @@ -210,8 +194,8 @@ fun normalize' ((simplifyLists, simplifyAtoms, negate) norm junc end -fun normalize (simplifyLists, simplifyAtoms, negate, junc) = - (normalize' (simplifyLists, simplifyAtoms, negate) junc) +fun normalize simplify negate junc = + normalize' simplify negate junc o flatten o pushNegate negate false @@ -247,7 +231,7 @@ structure CmpKey : ORD_KEY = struct end - +(* functor ListKeyFn (K : ORD_KEY) : ORD_KEY = struct type ord_key = K.ord_key list @@ -261,6 +245,7 @@ functor ListKeyFn (K : ORD_KEY) : ORD_KEY = struct | ord => ord) end +*) functor OptionKeyFn (K : ORD_KEY) : ORD_KEY = struct @@ -294,6 +279,20 @@ val rec chooseTwos : 'a list -> ('a * 'a) list = fn [] => [] | x :: ys => map (fn y => (x, y)) ys @ chooseTwos ys +fun removeRedundant madeRedundantBy zs = + let + fun removeRedundant' (xs, ys) = + case xs of + [] => ys + | x :: xs' => + removeRedundant' (xs', + if List.exists (fn y => madeRedundantBy (x, y)) (xs' @ ys) + then ys + else x :: ys) + in + removeRedundant' (zs, []) + end + datatype atomExp = QueryArg of int | DmlRel of int @@ -329,7 +328,7 @@ structure ConflictMaps = struct structure J = OptionKeyFn(AtomExpKey) structure K = OptionKeyFn(AtomExpKey)) structure TS = BinarySetFn(TK) - structure TLS = BinarySetFn(ListKeyFn(TK)) + (* structure TLS = BinarySetFn(ListKeyFn(TK)) *) val toKnownEquality = (* [NONE] here means unkown. Anything that isn't a comparison between two @@ -365,9 +364,6 @@ structure ConflictMaps = struct (* TODO: deal with equalities between [DmlRel]s and [Prim]s. This would involve guarding the invalidation with a check for the relevant comparisons. *) - (* DEBUG: remove these print statements. *) - (* | ((DmlRel r, Prim p), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) - (* | ((Prim p, DmlRel r), eqso) => (print ("sadness " ^ Int.toString r ^ " = " ^ Prim.toString p ^ "\n"); eqso) *) | (_, eqso) => eqso val eqsOfClass : atomExp list -> atomExp IM.map option = @@ -416,20 +412,12 @@ structure ConflictMaps = struct fun dnf (fQuery, fDml) = let - val isStar = - (* TODO: decide if this is okay and, if so, factor out magic - string "*" to a common location. *) - (* First guess: definitely okay for conservative approximation, - though information lost might be useful even in current - implementation for finding an extra equality. *) - fn SOME (Field (_, field)) => String.isSuffix "*" field - | _ => false - fun canIgnore (_, a1, a2) = isStar a1 orelse isStar a2 - fun simplifyLists xs = TLS.listItems (TLS.addList (TLS.empty, xs)) - fun simplifyAtoms xs = TS.listItems (TS.addList (TS.empty, xs)) + val simplify = + map TS.listItems + o removeRedundant (fn (x, y) => TS.isSubset (y, x)) + o map (fn xs => TS.addList (TS.empty, xs)) in - normalize (simplifyLists, simplifyAtoms, negateCmp, Disj) - (Combo (Conj, [markQuery fQuery, markDml fDml])) + normalize simplify negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) end val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf @@ -478,9 +466,12 @@ val rec dmlToFormula = let val fWhere = sqexpToFormula wher val fVals = valsToFormula (table, vals) + val modifiedFields = SS.addList (SS.empty, map #1 vals) (* TODO: don't use field name hack. *) val markField = - fn Sql.Field (t, v) => Sql.Field (t, v ^ "*") + fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) + then Sql.Field (t, v ^ "'") + else e | e => e val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) in @@ -673,28 +664,17 @@ structure Invalidations = struct represents unknown, which means a wider invalidation. *) val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = fn ([], []) => true - | (NONE :: xs, _ :: ys) => madeRedundantBy (xs, ys) + | (_ :: xs, NONE :: ys) => madeRedundantBy (xs, ys) | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of EQUAL => madeRedundantBy (xs, ys) | _ => false) | _ => false - fun removeRedundant' (xss, yss) = - case xss of - [] => yss - | xs :: xss' => - removeRedundant' (xss', - if List.exists (fn ys => madeRedundantBy (xs, ys)) (xss' @ yss) - then yss - else xs :: yss) - - fun removeRedundant xss = removeRedundant' (xss, []) - fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) fun invalidations ((query, numArgs), dml) = (map (map optionAtomExpToExp) - o removeRedundant + o removeRedundant madeRedundantBy o map (eqsToInvalidation numArgs) o eqss) (query, dml) -- cgit v1.2.3 From f9021ccf1a76dd7e570061849acdec515b5be790 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 6 Jul 2015 01:31:04 -0700 Subject: Only use string (rather than numeric, etc.) primitives in parsed SQL statements. --- caching-tests/test.ur | 2 +- src/sql.sml | 41 ++++++++++++++++++++++++++++++++--------- 2 files changed, 33 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 6721a464..510a5524 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -14,7 +14,7 @@ fun cache id = fun flush id = dml (UPDATE tab - SET Val = 42 + SET Id = 29, Val = 42 WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); return Changed {[id]}! diff --git a/src/sql.sml b/src/sql.sml index 22ffea39..959575e9 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -152,6 +152,18 @@ fun keep cp chs = end | _ => NONE +(* Used by primSqlcache. *) +fun optConst s chs = + case chs of + String s' :: chs => if String.isPrefix s s' then + SOME (s, if size s = size s' then + chs + else + String (String.extract (s', size s, NONE)) :: chs) + else + SOME ("", String s' :: chs) + | _ => NONE + fun ws p = wrap (follow (skip (fn ch => ch = #" ")) (follow p (skip (fn ch => ch = #" ")))) (#1 o #2) @@ -256,6 +268,23 @@ val prim = wrap (follow (opt (const "E")) (follow string (opt (const "::text")))) ((fn s => Prim.String (Prim.Normal, s)) o #1 o #2)] +val primSqlcache = + (* Like [prim], but always uses [Prim.String]s. *) + let + fun wrapS p f = wrap p ((fn s => Prim.String (Prim.Normal, s)) o f) + in + altL [wrapS (follow (wrap (follow (keep Char.isDigit) + (follow (const ".") (keep Char.isDigit))) + (fn (x, ((), y)) => x ^ "." ^ y)) + (optConst "::float8")) + op^, + wrapS (follow (keep Char.isDigit) + (optConst "::int8")) + op^, + wrapS (follow (optConst "E") (follow string (optConst "::text"))) + (fn (c1, (s, c2)) => c1 ^ s ^ c2)] +end + fun known' chs = case chs of Exp (EFfi ("Basis", "sql_known"), _) :: chs => SOME ((), chs) @@ -278,7 +307,7 @@ fun sqlify chs = fun sqlifySqlcache chs = case chs of - (* Could have variables as well as FFIs. *) + (* Could have variables or constants as well as FFIs. *) Exp (e as (ERel _, _)) :: chs => SOME (e, chs) (* If it is an FFI, match the entire expression. *) | Exp (e as (EFfiApp ("Basis", f, [(_, _)]), _)) :: chs => @@ -286,13 +315,7 @@ fun sqlifySqlcache chs = SOME (e, chs) else NONE - | Exp (ECase (e, [((PCon (_, PConFfi {mod = "Basis", con = "True", ...}, NONE), _), - (EPrim (Prim.String (Prim.Normal, "TRUE")), _)), - ((PCon (_, PConFfi {mod = "Basis", con = "False", ...}, NONE), _), - (EPrim (Prim.String (Prim.Normal, "FALSE")), _))], _), _) :: chs => - SOME (e, chs) - - | _ => NONE + | _ => sqlify chs fun constK s = wrap (const s) (fn () => s) @@ -309,7 +332,7 @@ val sqlcacheMode = ref false; fun sqexp chs = log "sqexp" - (altL [wrap prim SqConst, + (altL [wrap (if !sqlcacheMode then primSqlcache else prim) SqConst, wrap (const "TRUE") (fn () => SqTrue), wrap (const "FALSE") (fn () => SqFalse), wrap (const "NULL") (fn () => Null), -- cgit v1.2.3 From 03b7950e3639899de788cac8824a0e7f4be8a0bd Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 7 Jul 2015 00:07:24 -0700 Subject: Add limited support for parsing SQL arithmetic. --- caching-tests/test.ur | 7 +++---- src/sql.sml | 6 ++++++ src/sqlcache.sml | 14 ++++++-------- 3 files changed, 15 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 510a5524..f6568db4 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -11,11 +11,10 @@ fun cache id = | Some row => {[row.Tab.Val]}} - fun flush id = - dml (UPDATE tab - SET Id = 29, Val = 42 - WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); + dml (UPDATE tab + SET Val = Val * (Id + 2) / Val - 3 + WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); return Changed {[id]}! diff --git a/src/sql.sml b/src/sql.sml index 959575e9..27894e3f 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -325,6 +325,11 @@ val funcName = altL [constK "COUNT", constK "SUM", constK "AVG"] +fun arithmetic pExp = follow (const "(") + (follow pExp + (follow (altL (map const [" + ", " - ", " * ", " / "])) + (follow pExp (const ")")))) + val unmodeled = altL [const "COUNT(*)", const "CURRENT_TIMESTAMP"] @@ -340,6 +345,7 @@ fun sqexp chs = wrap uw_ident Computed, wrap known SqKnown, wrap func SqFunc, + wrap (arithmetic sqexp) (fn _ => Unmodeled), wrap unmodeled (fn () => Unmodeled), wrap (if !sqlcacheMode then sqlifySqlcache else sqlify) Inj, wrap (follow (const "COALESCE(") (follow sqexp (follow (const ",") diff --git a/src/sqlcache.sml b/src/sqlcache.sml index d5f6c1c0..5f737ac5 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -410,15 +410,13 @@ structure ConflictMaps = struct List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) (SOME IM.empty) + val simplify = + map TS.listItems + o removeRedundant (fn (x, y) => TS.isSubset (y, x)) + o map (fn xs => TS.addList (TS.empty, xs)) + fun dnf (fQuery, fDml) = - let - val simplify = - map TS.listItems - o removeRedundant (fn (x, y) => TS.isSubset (y, x)) - o map (fn xs => TS.addList (TS.empty, xs)) - in - normalize simplify negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) - end + normalize simplify negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf -- cgit v1.2.3 From bc38beafd07b7ae6106a2fffda82084a08af7f06 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 19 Jul 2015 19:03:11 -0700 Subject: Rename C functions and remove functors nested inside modules. --- include/urweb/types_cpp.h | 28 ++++++++--------- include/urweb/urweb_cpp.h | 6 ++-- src/c/urweb.c | 78 +++++++++++++++++++++++------------------------ src/lru_cache.sml | 12 ++++---- src/option_key_fn.sml | 11 +++++++ src/sources | 3 +- src/sqlcache.sml | 30 +----------------- src/triple_key_fn.sml | 15 +++++++++ 8 files changed, 91 insertions(+), 92 deletions(-) create mode 100644 src/option_key_fn.sml create mode 100644 src/triple_key_fn.sml (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 2f154e1f..7b9a90a4 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -123,31 +123,31 @@ typedef struct { #include "uthash.h" -typedef struct CacheValue { +typedef struct uw_sqlcache_CacheValue { char *result; char *output; -} CacheValue; +} uw_sqlcache_CacheValue; -typedef struct CacheEntry { +typedef struct uw_sqlcache_CacheEntry { char *key; void *value; time_t timeValid; - struct CacheEntry *prev; - struct CacheEntry *next; + struct uw_sqlcache_CacheEntry *prev; + struct uw_sqlcache_CacheEntry *next; UT_hash_handle hh; -} CacheEntry; +} uw_sqlcache_CacheEntry; -typedef struct CacheList { - CacheEntry *first; - CacheEntry *last; +typedef struct uw_sqlcache_CacheList { + uw_sqlcache_CacheEntry *first; + uw_sqlcache_CacheEntry *last; int size; -} CacheList; +} uw_sqlcache_CacheList; -typedef struct Cache { - CacheEntry *table; +typedef struct uw_sqlcache_Cache { + uw_sqlcache_CacheEntry *table; time_t timeInvalid; - CacheList *lru; + uw_sqlcache_CacheList *lru; int height; -} Cache; +} uw_sqlcache_Cache; #endif diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 3ae5b69e..3fac7041 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -406,8 +406,8 @@ void uw_Basis_writec(struct uw_context *, char); #include "uthash.h" -CacheValue *check(Cache *, char **); -CacheValue *store(Cache *, char **, CacheValue *); -CacheValue *flush(Cache *, char **); +uw_sqlcache_CacheValue *uw_sqlcache_check(uw_sqlcache_Cache *, char **); +uw_sqlcache_CacheValue *uw_sqlcache_store(uw_sqlcache_Cache *, char **, uw_sqlcache_CacheValue *); +uw_sqlcache_CacheValue *uw_sqlcache_flush(uw_sqlcache_Cache *, char **); #endif diff --git a/src/c/urweb.c b/src/c/urweb.c index e0fd503c..3993448b 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4500,7 +4500,7 @@ void uw_set_remoteSock(uw_context ctx, int sock) { // Sqlcache -void listDelete(CacheList *list, CacheEntry *entry) { +void uw_sqlcache_listDelete(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { if (list->first == entry) { list->first = entry->next; } @@ -4518,7 +4518,7 @@ void listDelete(CacheList *list, CacheEntry *entry) { --(list->size); } -void listAdd(CacheList *list, CacheEntry *entry) { +void uw_sqlcache_listAdd(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { if (list->last) { list->last->next = entry; entry->prev = list->last; @@ -4530,22 +4530,22 @@ void listAdd(CacheList *list, CacheEntry *entry) { ++(list->size); } -void listBump(CacheList *list, CacheEntry *entry) { - listDelete(list, entry); - listAdd(list, entry); +void uw_sqlcache_listBump(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { + uw_sqlcache_listDelete(list, entry); + uw_sqlcache_listAdd(list, entry); } // TODO: deal with time properly. -time_t getTimeNow() { +time_t uw_sqlcache_getTimeNow() { return time(NULL); } -time_t timeMax(time_t x, time_t y) { +time_t uw_sqlcache_timeMax(time_t x, time_t y) { return difftime(x, y) > 0 ? x : y; } -void freeCacheValue(CacheValue *value) { +void uw_sqlcache_freeuw_sqlcache_CacheValue(uw_sqlcache_CacheValue *value) { if (value) { free(value->result); free(value->output); @@ -4553,83 +4553,83 @@ void freeCacheValue(CacheValue *value) { } } -void delete(Cache *cache, CacheEntry* entry) { - //listDelete(cache->lru, entry); +void uw_sqlcache_delete(uw_sqlcache_Cache *cache, uw_sqlcache_CacheEntry* entry) { + //uw_sqlcache_listUw_Sqlcache_Delete(cache->lru, entry); HASH_DELETE(hh, cache->table, entry); - freeCacheValue(entry->value); + uw_sqlcache_freeuw_sqlcache_CacheValue(entry->value); free(entry->key); free(entry); } -CacheValue *checkHelper(Cache *cache, char **keys, int timeInvalid) { +uw_sqlcache_CacheValue *uw_sqlcache_checkHelper(uw_sqlcache_Cache *cache, char **keys, int timeInvalid) { char *key = keys[cache->height]; - CacheEntry *entry; + uw_sqlcache_CacheEntry *entry; HASH_FIND(hh, cache->table, key, strlen(key), entry); - timeInvalid = timeMax(timeInvalid, cache->timeInvalid); + timeInvalid = uw_sqlcache_timeMax(timeInvalid, cache->timeInvalid); if (entry && difftime(entry->timeValid, timeInvalid) > 0) { if (cache->height == 0) { // At height 0, entry->value is the desired value. - //listBump(cache->lru, entry); + //uw_sqlcache_listBump(cache->lru, entry); return entry->value; } else { // At height n+1, entry->value is a pointer to a cache at heignt n. - return checkHelper(entry->value, keys, timeInvalid); + return uw_sqlcache_checkHelper(entry->value, keys, timeInvalid); } } else { return NULL; } } -CacheValue *check(Cache *cache, char **keys) { - return checkHelper(cache, keys, 0); +uw_sqlcache_CacheValue *uw_sqlcache_check(uw_sqlcache_Cache *cache, char **keys) { + return uw_sqlcache_checkHelper(cache, keys, 0); } -void storeHelper(Cache *cache, char **keys, CacheValue *value, int timeNow) { - CacheEntry *entry; +void uw_sqlcache_storeHelper(uw_sqlcache_Cache *cache, char **keys, uw_sqlcache_CacheValue *value, int timeNow) { + uw_sqlcache_CacheEntry *entry; char *key = keys[cache->height]; HASH_FIND(hh, cache->table, key, strlen(key), entry); if (!entry) { - entry = malloc(sizeof(CacheEntry)); + entry = malloc(sizeof(uw_sqlcache_CacheEntry)); entry->key = strdup(key); entry->value = NULL; HASH_ADD_KEYPTR(hh, cache->table, entry->key, strlen(entry->key), entry); } entry->timeValid = timeNow; if (cache->height == 0) { - //listAdd(cache->lru, entry); - freeCacheValue(entry->value); + //uw_sqlcache_listAdd(cache->lru, entry); + uw_sqlcache_freeuw_sqlcache_CacheValue(entry->value); entry->value = value; //if (cache->lru->size > MAX_SIZE) { - //delete(cache, cache->lru->first); + //uw_sqlcache_delete(cache, cache->lru->first); // TODO: return flushed value. //} } else { if (!entry->value) { - Cache *newCache = malloc(sizeof(Cache)); - newCache->table = NULL; - newCache->timeInvalid = timeNow; - newCache->lru = cache->lru; - newCache->height = cache->height - 1; - entry->value = newCache; + uw_sqlcache_Cache *newuw_sqlcache_Cache = malloc(sizeof(uw_sqlcache_Cache)); + newuw_sqlcache_Cache->table = NULL; + newuw_sqlcache_Cache->timeInvalid = timeNow; + newuw_sqlcache_Cache->lru = cache->lru; + newuw_sqlcache_Cache->height = cache->height - 1; + entry->value = newuw_sqlcache_Cache; } - storeHelper(entry->value, keys, value, timeNow); + uw_sqlcache_storeHelper(entry->value, keys, value, timeNow); } } -void store(Cache *cache, char **keys, CacheValue *value) { - storeHelper(cache, keys, value, getTimeNow()); +void uw_sqlcache_store(uw_sqlcache_Cache *cache, char **keys, uw_sqlcache_CacheValue *value) { + uw_sqlcache_storeHelper(cache, keys, value, uw_sqlcache_getTimeNow()); } -void flushHelper(Cache *cache, char **keys, int timeNow) { - CacheEntry *entry; +void uw_sqlcache_flushHelper(uw_sqlcache_Cache *cache, char **keys, int timeNow) { + uw_sqlcache_CacheEntry *entry; char *key = keys[cache->height]; if (key) { HASH_FIND(hh, cache->table, key, strlen(key), entry); if (entry) { if (cache->height == 0) { - delete(cache, entry); + uw_sqlcache_delete(cache, entry); } else { - flushHelper(entry->value, keys, timeNow); + uw_sqlcache_flushHelper(entry->value, keys, timeNow); } } } else { @@ -4638,6 +4638,6 @@ void flushHelper(Cache *cache, char **keys, int timeNow) { } } -void flush(Cache *cache, char **keys) { - flushHelper(cache, keys, getTimeNow()); +void uw_sqlcache_flush(uw_sqlcache_Cache *cache, char **keys) { + uw_sqlcache_flushHelper(cache, keys, uw_sqlcache_getTimeNow()); } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 87e939fa..26590312 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -64,7 +64,7 @@ fun setupQuery {index, params} = in Print.box - [string ("static Cache cacheStruct" ^ i ^ " = {"), + [string ("static uw_sqlcache_Cache cacheStruct" ^ i ^ " = {"), newline, string " .table = NULL,", newline, @@ -74,7 +74,7 @@ fun setupQuery {index, params} = newline, string (" .height = " ^ Int.toString (params - 1) ^ "};"), newline, - string ("static Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), + string ("static uw_sqlcache_Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), newline, newline, @@ -83,7 +83,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" CacheValue *v = check(cache" ^ i ^ ", ks);"), + string (" uw_sqlcache_CacheValue *v = uw_sqlcache_check(cache" ^ i ^ ", ks);"), newline, string " if (v) {", newline, @@ -112,7 +112,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" CacheValue *v = malloc(sizeof(CacheValue));"), + string (" uw_sqlcache_CacheValue *v = malloc(sizeof(uw_sqlcache_CacheValue));"), newline, string " v->result = strdup(s);", newline, @@ -120,7 +120,7 @@ fun setupQuery {index, params} = newline, string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline, - string (" store(cache" ^ i ^ ", ks, v);"), + string (" uw_sqlcache_store(cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", newline, @@ -133,7 +133,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" flush(cache" ^ i ^ ", ks);"), + string (" uw_sqlcache_flush(cache" ^ i ^ ", ks);"), newline, string " return uw_unit_v;", newline, diff --git a/src/option_key_fn.sml b/src/option_key_fn.sml new file mode 100644 index 00000000..ba636d4e --- /dev/null +++ b/src/option_key_fn.sml @@ -0,0 +1,11 @@ +functor OptionKeyFn(K : ORD_KEY) : ORD_KEY = struct + +type ord_key = K.ord_key option + +val compare = + fn (NONE, NONE) => EQUAL + | (NONE, _) => LESS + | (_, NONE) => GREATER + | (SOME x, SOME y) => K.compare (x, y) + +end diff --git a/src/sources b/src/sources index 0608d710..f0914bdf 100644 --- a/src/sources +++ b/src/sources @@ -172,8 +172,9 @@ $(SRC)/sql.sig $(SRC)/sql.sml $(SRC)/union_find_fn.sml - $(SRC)/multimap_fn.sml +$(SRC)/option_key_fn.sml +$(SRC)/triple_key_fn.sml $(SRC)/cache.sml $(SRC)/toy_cache.sml diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 5f737ac5..ff58ef77 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -207,7 +207,7 @@ fun mapFormula mf = (* SQL analysis. *) -structure CmpKey : ORD_KEY = struct +structure CmpKey = struct type ord_key = Sql.cmp @@ -247,34 +247,6 @@ functor ListKeyFn (K : ORD_KEY) : ORD_KEY = struct end *) -functor OptionKeyFn (K : ORD_KEY) : ORD_KEY = struct - - type ord_key = K.ord_key option - - val compare = - fn (NONE, NONE) => EQUAL - | (NONE, _) => LESS - | (_, NONE) => GREATER - | (SOME x, SOME y) => K.compare (x, y) - -end - -functor TripleKeyFn (structure I : ORD_KEY - structure J : ORD_KEY - structure K : ORD_KEY) - : ORD_KEY where type ord_key = I.ord_key * J.ord_key * K.ord_key = struct - - type ord_key = I.ord_key * J.ord_key * K.ord_key - - fun compare ((i1, j1, k1), (i2, j2, k2)) = - case I.compare (i1, i2) of - EQUAL => (case J.compare (j1, j2) of - EQUAL => K.compare (k1, k2) - | ord => ord) - | ord => ord - -end - val rec chooseTwos : 'a list -> ('a * 'a) list = fn [] => [] | x :: ys => map (fn y => (x, y)) ys @ chooseTwos ys diff --git a/src/triple_key_fn.sml b/src/triple_key_fn.sml new file mode 100644 index 00000000..ba77c60b --- /dev/null +++ b/src/triple_key_fn.sml @@ -0,0 +1,15 @@ +functor TripleKeyFn (structure I : ORD_KEY + structure J : ORD_KEY + structure K : ORD_KEY) + : ORD_KEY where type ord_key = I.ord_key * J.ord_key * K.ord_key = struct + +type ord_key = I.ord_key * J.ord_key * K.ord_key + +fun compare ((i1, j1, k1), (i2, j2, k2)) = + case I.compare (i1, i2) of + EQUAL => (case J.compare (j1, j2) of + EQUAL => K.compare (k1, k2) + | ord => ord) + | ord => ord + +end -- cgit v1.2.3 From 0cfbe4639f076d50f2a3bbc9e6f566a452a43167 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 20 Jul 2015 19:49:13 -0700 Subject: Fix possible formula simplification bug with extra formula' type. --- src/sqlcache.sml | 58 ++++++++++++++++++++++---------------------------------- 1 file changed, 23 insertions(+), 35 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index ff58ef77..fae8b5f3 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -145,6 +145,11 @@ datatype 'atom formula = | Negate of 'atom formula | Combo of junctionType * 'atom formula list +(* Guaranteed to have all negation pushed to the atoms. *) +datatype 'atom formula' = + Atom' of 'atom + | Combo' of junctionType * 'atom formula' list + val flipJt = fn Conj => Disj | Disj => Conj fun concatMap f xs = List.concat (map f xs) @@ -156,33 +161,33 @@ val rec cartesianProduct : 'a list list -> 'a list list = (* Pushes all negation to the atoms.*) fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = - fn Atom x => Atom (if negating then negate x else x) + fn Atom x => Atom' (if negating then negate x else x) | Negate f => pushNegate negate (not negating) f - | Combo (n, fs) => Combo (if negating then flipJt n else n, map (pushNegate negate negating) fs) + | Combo (j, fs) => Combo' (if negating then flipJt j else j, map (pushNegate negate negating) fs) val rec flatten = - fn Combo (_, [f]) => flatten f - | Combo (j, fs) => - Combo (j, List.foldr (fn (f, acc) => - case f of - Combo (j', fs') => - if j = j' orelse length fs' = 1 - then fs' @ acc - else f :: acc - | _ => f :: acc) - [] - (map flatten fs)) + fn Combo' (_, [f]) => flatten f + | Combo' (j, fs) => + Combo' (j, List.foldr (fn (f, acc) => + case f of + Combo' (j', fs') => + if j = j' orelse length fs' = 1 + then fs' @ acc + else f :: acc + | _ => f :: acc) + [] + (map flatten fs)) | f => f +(* [simplify] operates on the desired normal form. E.g., if [junc] is [Disj], + consider the list of lists to be a disjunction of conjunctions. *) fun normalize' (simplify : 'a list list -> 'a list list) - (negate : 'a -> 'a) (junc : junctionType) = let fun norm junc = simplify - o (fn Atom x => [[x]] - | Negate f => map (map negate) (norm (flipJt junc) f) - | Combo (j, fs) => + o (fn Atom' x => [[x]] + | Combo' (j, fs) => let val fss = map (norm junc) fs in @@ -195,7 +200,7 @@ fun normalize' (simplify : 'a list list -> 'a list list) end fun normalize simplify negate junc = - normalize' simplify negate junc + normalize' simplify junc o flatten o pushNegate negate false @@ -231,22 +236,6 @@ structure CmpKey = struct end -(* -functor ListKeyFn (K : ORD_KEY) : ORD_KEY = struct - - type ord_key = K.ord_key list - - val rec compare = - fn ([], []) => EQUAL - | ([], _) => LESS - | (_, []) => GREATER - | (x :: xs, y :: ys) => (case K.compare (x, y) of - EQUAL => compare (xs, ys) - | ord => ord) - -end -*) - val rec chooseTwos : 'a list -> ('a * 'a) list = fn [] => [] | x :: ys => map (fn y => (x, y)) ys @ chooseTwos ys @@ -300,7 +289,6 @@ structure ConflictMaps = struct structure J = OptionKeyFn(AtomExpKey) structure K = OptionKeyFn(AtomExpKey)) structure TS = BinarySetFn(TK) - (* structure TLS = BinarySetFn(ListKeyFn(TK)) *) val toKnownEquality = (* [NONE] here means unkown. Anything that isn't a comparison between two -- cgit v1.2.3 From 4ff7cf9503b917dcc8db1de3ba03b513240f7dc8 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 20 Jul 2015 23:25:44 -0700 Subject: Use uniform representation of comparisons for better simplification. --- src/option_key_fn.sml | 3 ++- src/sqlcache.sml | 56 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 38 insertions(+), 21 deletions(-) (limited to 'src') diff --git a/src/option_key_fn.sml b/src/option_key_fn.sml index ba636d4e..27ba9138 100644 --- a/src/option_key_fn.sml +++ b/src/option_key_fn.sml @@ -1,4 +1,5 @@ -functor OptionKeyFn(K : ORD_KEY) : ORD_KEY = struct +functor OptionKeyFn(K : ORD_KEY) + : ORD_KEY where type ord_key = K.ord_key option = struct type ord_key = K.ord_key option diff --git a/src/sqlcache.sml b/src/sqlcache.sml index fae8b5f3..a59f8b55 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -160,10 +160,11 @@ val rec cartesianProduct : 'a list list -> 'a list list = (cartesianProduct xss) (* Pushes all negation to the atoms.*) -fun pushNegate (negate : 'atom -> 'atom) (negating : bool) = - fn Atom x => Atom' (if negating then negate x else x) - | Negate f => pushNegate negate (not negating) f - | Combo (j, fs) => Combo' (if negating then flipJt j else j, map (pushNegate negate negating) fs) +fun pushNegate (normalizeAtom : bool * 'atom -> 'atom) (negating : bool) = + fn Atom x => Atom' (normalizeAtom (negating, x)) + | Negate f => pushNegate normalizeAtom (not negating) f + | Combo (j, fs) => Combo' (if negating then flipJt j else j, + map (pushNegate normalizeAtom negating) fs) val rec flatten = fn Combo' (_, [f]) => flatten f @@ -199,10 +200,10 @@ fun normalize' (simplify : 'a list list -> 'a list list) norm junc end -fun normalize simplify negate junc = +fun normalize simplify normalizeAtom junc = normalize' simplify junc o flatten - o pushNegate negate false + o pushNegate normalizeAtom false fun mapFormula mf = fn Atom x => Atom (mf x) @@ -281,14 +282,16 @@ structure AtomExpKey : ORD_KEY = struct end +structure AtomOptionKey = OptionKeyFn(AtomExpKey) + structure UF = UnionFindFn(AtomExpKey) structure ConflictMaps = struct structure TK = TripleKeyFn(structure I = CmpKey - structure J = OptionKeyFn(AtomExpKey) - structure K = OptionKeyFn(AtomExpKey)) - structure TS = BinarySetFn(TK) + structure J = AtomOptionKey + structure K = AtomOptionKey) + structure TS : ORD_SET = BinarySetFn(TK) val toKnownEquality = (* [NONE] here means unkown. Anything that isn't a comparison between two @@ -345,15 +348,28 @@ structure ConflictMaps = struct (cmp, qa e1, qa e2) end - fun negateCmp (cmp, e1, e2) = - (case cmp of - Sql.Eq => Sql.Ne - | Sql.Ne => Sql.Eq - | Sql.Lt => Sql.Ge - | Sql.Le => Sql.Gt - | Sql.Gt => Sql.Le - | Sql.Ge => Sql.Lt, - e1, e2) + val negateCmp = + fn Sql.Eq => Sql.Ne + | Sql.Ne => Sql.Eq + | Sql.Lt => Sql.Ge + | Sql.Le => Sql.Gt + | Sql.Gt => Sql.Le + | Sql.Ge => Sql.Lt + + fun normalizeAtom (negating, (cmp, e1, e2)) = + (* Restricting to Le/Lt and sorting the expressions in Eq/Ne helps with + simplification, where we put the triples in sets. *) + case (if negating then negateCmp cmp else cmp) of + Sql.Eq => (case AtomOptionKey.compare (e1, e2) of + LESS => (Sql.Eq, e2, e1) + | _ => (Sql.Eq, e1, e2)) + | Sql.Ne => (case AtomOptionKey.compare (e1, e2) of + LESS => (Sql.Ne, e2, e1) + | _ => (Sql.Ne, e1, e2)) + | Sql.Lt => (Sql.Lt, e1, e2) + | Sql.Le => (Sql.Le, e1, e2) + | Sql.Gt => (Sql.Lt, e2, e1) + | Sql.Ge => (Sql.Le, e2, e1) val markQuery : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> (Sql.cmp * atomExp option * atomExp option) formula = @@ -376,7 +392,7 @@ structure ConflictMaps = struct o map (fn xs => TS.addList (TS.empty, xs)) fun dnf (fQuery, fDml) = - normalize simplify negateCmp Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) + normalize simplify normalizeAtom Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf @@ -435,7 +451,7 @@ val rec dmlToFormula = in renameTables [(table, "T")] (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), - Combo (Conj, [mark fVals, fWhere])])) + Combo (Conj, [mark fVals, fWhere])])) end val rec tablesQuery = -- cgit v1.2.3 From 68e7308878bd4baac189c9e635c66b194adb9a0e Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 2 Aug 2015 18:25:42 -0700 Subject: Sqlcache allows any expression injected into SQL. --- src/sql.sml | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/sql.sml b/src/sql.sml index 27894e3f..a04409a0 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -305,17 +305,11 @@ fun sqlify chs = | _ => NONE -fun sqlifySqlcache chs = - case chs of - (* Could have variables or constants as well as FFIs. *) - Exp (e as (ERel _, _)) :: chs => SOME (e, chs) - (* If it is an FFI, match the entire expression. *) - | Exp (e as (EFfiApp ("Basis", f, [(_, _)]), _)) :: chs => - if String.isPrefix "sqlify" f then - SOME (e, chs) - else - NONE - | _ => sqlify chs +(* For sqlcache, we only care that we can do string equality on injected Mono + expressions, so accept any expression without modifying it. *) +val sqlifySqlcache = + fn Exp e :: chs => SOME (e, chs) + | _ => None fun constK s = wrap (const s) (fn () => s) -- cgit v1.2.3 From 26333a65b7b9ec4b68d76e17d43beb64145c728e Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 2 Aug 2015 18:37:24 -0700 Subject: Fix NONE capitalization typo. --- src/sql.sml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/sql.sml b/src/sql.sml index a04409a0..da0143b7 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -309,7 +309,7 @@ fun sqlify chs = expressions, so accept any expression without modifying it. *) val sqlifySqlcache = fn Exp e :: chs => SOME (e, chs) - | _ => None + | _ => NONE fun constK s = wrap (const s) (fn () => s) -- cgit v1.2.3 From 5c4c302aea71f47679e8d8b4197f869355b2180a Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 15 Aug 2015 23:08:37 -0700 Subject: Rewrite effectfulness analysis using MonoUtil. --- src/sqlcache.sml | 193 +++++++++++++++++++++++++++---------------------------- 1 file changed, 94 insertions(+), 99 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index a59f8b55..8fae15eb 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -43,100 +43,82 @@ val cache = ref LruCache.cache fun setCache c = cache := c fun getCache () = !cache +(* Used to have type context for local variables in MonoUtil functions. *) +val doBind = + fn (ctx, MonoUtil.Exp.RelE (_, t)) => t :: ctx + | (ctx, _) => ctx -(* Effect analysis. *) + +(*******************) +(* Effect Analysis *) +(*******************) (* Makes an exception for [EWrite] (which is recorded when caching). *) -fun effectful doPrint (effs : IS.set) (inFunction : bool) (bound : int) : exp -> bool = - (* If result is true, expression is definitely effectful. If result is - false, then expression is definitely not effectful if effs is fully - populated. The intended pattern is to use this a number of times equal - to the number of declarations in a file, Bellman-Ford style. *) - (* TODO: make incrementing of the number of bound variables cleaner, - probably by using [MonoUtil] instead of all this. *) +fun effectful (effs : IS.set) = let - (* DEBUG: remove printing when done. *) - fun tru msg = if doPrint then (print (msg ^ "\n"); true) else true - val rec eff' = - (* ASK: is there a better way? *) - fn EPrim _ => false - (* We don't know if local functions have effects when applied. *) - | ERel idx => if inFunction andalso idx >= bound - then tru ("rel" ^ Int.toString idx) else false - | ENamed name => if IS.member (effs, name) then tru "named" else false - | ECon (_, _, NONE) => false - | ECon (_, _, SOME e) => eff e - | ENone _ => false - | ESome (_, e) => eff e - | EFfi (m, f) => if ffiEffectful (m, f) then tru "ffi" else false - | EFfiApp (m, f, _) => if ffiEffectful (m, f) then tru "ffiapp" else false - (* ASK: we're calling functions effectful if they have effects when - applied or if the function expressions themselves have effects. - Is that okay? *) - (* This is okay because the values we ultimately care about aren't - functions, and this is a conservative approximation, anyway. *) - | EApp (eFun, eArg) => effectful doPrint effs true bound eFun orelse eff eArg - | EAbs (_, _, _, e) => effectful doPrint effs inFunction (bound+1) e - | EUnop (_, e) => eff e - | EBinop (_, _, e1, e2) => eff e1 orelse eff e2 - | ERecord xs => List.exists (fn (_, e, _) => eff e) xs - | EField (e, _) => eff e - (* If any case could be effectful, consider it effectful. *) - | ECase (e, xs, _) => eff e orelse List.exists (fn (_, e) => eff e) xs - | EStrcat (e1, e2) => eff e1 orelse eff e2 - (* ASK: how should we treat these three? *) - | EError _ => tru "error" - | EReturnBlob _ => tru "blob" - | ERedirect _ => tru "redirect" - (* EWrite is a special exception because we record writes when caching. *) - | EWrite _ => false - | ESeq (e1, e2) => eff e1 orelse eff e2 - (* TODO: keep context of which local variables aren't effectful? Only - makes a difference for function expressions, though. *) - | ELet (_, _, eBind, eBody) => eff eBind orelse - effectful doPrint effs inFunction (bound+1) eBody - | EClosure (_, es) => List.exists eff es - (* TODO: deal with EQuery. *) - | EQuery _ => tru "query" - | EDml _ => tru "dml" - | ENextval _ => tru "nextval" - | ESetval _ => tru "setval" - | EUnurlify (e, _, _) => eff e - (* ASK: how should we treat this? *) - | EJavaScript _ => tru "javascript" - (* ASK: these are all effectful, right? *) - | ESignalReturn _ => tru "signalreturn" - | ESignalBind _ => tru "signalbind" - | ESignalSource _ => tru "signalsource" - | EServerCall _ => tru "servercall" - | ERecv _ => tru "recv" - | ESleep _ => tru "sleep" - | ESpawn _ => tru "spawn" - and eff = fn (e', _) => eff' e' + val isFunction = + fn (TFun _, _) => true + | _ => false + fun doExp (ctx, e) = + case e of + EPrim _ => false + (* For now: variables of function type might be effectful, but + others are fully evaluated and are therefore not effectful. *) + | ERel n => isFunction (List.nth (ctx, n)) + | ENamed n => IS.member (effs, n) + | EFfi (m, f) => ffiEffectful (m, f) + | EFfiApp (m, f, _) => ffiEffectful (m, f) + (* These aren't effectful unless a subexpression is. *) + | ECon _ => false + | ENone _ => false + | ESome _ => false + | EApp _ => false + | EAbs _ => false + | EUnop _ => false + | EBinop _ => false + | ERecord _ => false + | EField _ => false + | ECase _ => false + | EStrcat _ => false + (* EWrite is a special exception because we record writes when caching. *) + | EWrite _ => false + | ESeq _ => false + | ELet _ => false + (* ASK: what should we do about closures? *) + | EClosure _ => false + | EUnurlify _ => false + (* Everything else is some sort of effect. We could flip this and + explicitly list bits of Mono that are effectful, but this is + conservatively robust to future changes (however unlikely). *) + | _ => true in - eff + MonoUtil.Exp.existsB {typ = fn _ => false, exp = doExp, bind = doBind} end (* TODO: test this. *) -val effectfulMap = +fun effectfulDecls (decls, _) = let - fun doVal ((_, name, _, e, _), effMap) = - if effectful false effMap false 0 e - then IS.add (effMap, name) - else effMap + fun doVal ((_, name, _, e, _), effs) = + if effectful effs [] e + then IS.add (effs, name) + else effs val doDecl = - fn (DVal v, effMap) => doVal (v, effMap) - (* Repeat the list of declarations a number of times equal to its size. *) - | (DValRec vs, effMap) => - List.foldl doVal effMap (List.concat (List.map (fn _ => vs) vs)) + fn ((DVal v, _), effs) => doVal (v, effs) + (* Repeat the list of declarations a number of times equal to its size, + making sure effectfulness propagates everywhere it should. This is + analagous to the Bellman-Ford algorithm. *) + | ((DValRec vs, _), effs) => + List.foldl doVal effs (List.concat (List.map (fn _ => vs) vs)) (* ASK: any other cases? *) - | (_, effMap) => effMap + | (_, effs) => effs in - MonoUtil.File.fold {typ = #2, exp = #2, decl = doDecl} IS.empty + List.foldl doDecl IS.empty decls end -(* Boolean formula normalization. *) +(*********************************) +(* Boolean Formula Normalization *) +(*********************************) datatype junctionType = Conj | Disj @@ -211,7 +193,9 @@ fun mapFormula mf = | Combo (j, fs) => Combo (j, map (mapFormula mf) fs) -(* SQL analysis. *) +(****************) +(* SQL Analysis *) +(****************) structure CmpKey = struct @@ -464,7 +448,9 @@ val tableDml = | Sql.Update (tab, _, _) => tab -(* Program instrumentation. *) +(***************************) +(* Program Instrumentation *) +(***************************) val varName = let @@ -477,6 +463,8 @@ val {check, store, flush, ...} = getCache () val dummyLoc = ErrorMsg.dummySpan +val dummyTyp = (TRecord [], dummyLoc) + fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), dummyLoc) val stringTyp = (TFfi ("Basis", "string"), dummyLoc) @@ -490,17 +478,15 @@ val sequence = end | _ => raise Match -(* Always increments negative indices because that's what we need later. *) -fun incRelsBound bound inc = +(* Always increments negative indices as a hack we use later. *) +fun incRels inc = MonoUtil.Exp.mapB - {typ = fn x => x, - exp = fn level => - (fn ERel n => ERel (if n >= level orelse n < 0 then n + inc else n) - | x => x), - bind = fn (level, MonoUtil.Exp.RelE _) => level + 1 | (level, _) => level} - bound - -val incRels = incRelsBound 0 + {typ = fn t' => t', + exp = fn bound => + (fn ERel n => ERel (if n >= bound orelse n < 0 then n + inc else n) + | e' => e'), + bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} + 0 fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = let @@ -523,13 +509,16 @@ fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = end fun fileMapfold doExp file start = - case MonoUtil.File.mapfold {typ = Search.return2, - exp = fn x => (fn s => Search.Continue (doExp x s)), - decl = Search.return2} file start of + case MonoUtil.File.mapfoldB + {typ = Search.return2, + exp = fn ctx => fn e' => fn s => Search.Continue (doExp ctx e' s), + decl = fn _ => Search.return2, + bind = doBind} + [] file start of Search.Continue x => x | Search.Return _ => raise Match -fun fileMap doExp file = #1 (fileMapfold (fn x => fn _ => (doExp x, ())) file ()) +fun fileMap doExp file = #1 (fileMapfold (fn _ => fn e => fn _ => (doExp e, ())) file ()) fun factorOutNontrivial text = let @@ -567,7 +556,7 @@ fun factorOutNontrivial text = fun addChecking file = let - fun doExp (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = + fun doExp ctx (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = fn e' as EQuery {query = origQueryText, sqlcacheInfo = urlifiedRel0, state = resultTyp, @@ -590,8 +579,12 @@ fun addChecking file = val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE - (* DEBUG: set first boolean argument to true to turn on printing. *) - fun safe bound = not o effectful true (effectfulMap file) false bound + val effs = effectfulDecls file + (* We use dummyTyp here. I think this is okay because databases + don't store (effectful) functions, but there could be some + corner case I missed. *) + fun safe bound = + not o effectful effs (List.tabulate (bound, fn _ => dummyTyp) @ ctx) val attempt = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( @@ -609,7 +602,9 @@ fun addChecking file = end | e' => (e', queryInfo) in - fileMapfold (fn exp => fn state => doExp state exp) file (SIMM.empty, IM.empty, 0) + fileMapfold (fn ctx => fn exp => fn state => doExp ctx state exp) + file + (SIMM.empty, IM.empty, 0) end structure Invalidations = struct -- cgit v1.2.3 From 46fe4e62ddefd8f79f4a29f7a273f585436d3c85 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 13 Sep 2015 16:02:45 -0400 Subject: Start work on pure expression caching. --- include/urweb/types_cpp.h | 28 ++++---- include/urweb/urweb_cpp.h | 6 +- src/c/openssl.c | 4 +- src/c/urweb.c | 78 ++++++++++----------- src/lru_cache.sml | 12 ++-- src/sqlcache.sml | 174 +++++++++++++++++++++++++++++++++++++++++----- 6 files changed, 221 insertions(+), 81 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 7b9a90a4..84423105 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -123,31 +123,31 @@ typedef struct { #include "uthash.h" -typedef struct uw_sqlcache_CacheValue { +typedef struct uw_Sqlcache_CacheValue { char *result; char *output; -} uw_sqlcache_CacheValue; +} uw_Sqlcache_CacheValue; -typedef struct uw_sqlcache_CacheEntry { +typedef struct uw_Sqlcache_CacheEntry { char *key; void *value; time_t timeValid; - struct uw_sqlcache_CacheEntry *prev; - struct uw_sqlcache_CacheEntry *next; + struct uw_Sqlcache_CacheEntry *prev; + struct uw_Sqlcache_CacheEntry *next; UT_hash_handle hh; -} uw_sqlcache_CacheEntry; +} uw_Sqlcache_CacheEntry; -typedef struct uw_sqlcache_CacheList { - uw_sqlcache_CacheEntry *first; - uw_sqlcache_CacheEntry *last; +typedef struct uw_Sqlcache_CacheList { + uw_Sqlcache_CacheEntry *first; + uw_Sqlcache_CacheEntry *last; int size; -} uw_sqlcache_CacheList; +} uw_Sqlcache_CacheList; -typedef struct uw_sqlcache_Cache { - uw_sqlcache_CacheEntry *table; +typedef struct uw_Sqlcache_Cache { + uw_Sqlcache_CacheEntry *table; time_t timeInvalid; - uw_sqlcache_CacheList *lru; + uw_Sqlcache_CacheList *lru; int height; -} uw_sqlcache_Cache; +} uw_Sqlcache_Cache; #endif diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 3fac7041..05e3e4a0 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -406,8 +406,8 @@ void uw_Basis_writec(struct uw_context *, char); #include "uthash.h" -uw_sqlcache_CacheValue *uw_sqlcache_check(uw_sqlcache_Cache *, char **); -uw_sqlcache_CacheValue *uw_sqlcache_store(uw_sqlcache_Cache *, char **, uw_sqlcache_CacheValue *); -uw_sqlcache_CacheValue *uw_sqlcache_flush(uw_sqlcache_Cache *, char **); +uw_Sqlcache_CacheValue *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **); +uw_Sqlcache_CacheValue *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, uw_Sqlcache_CacheValue *); +uw_Sqlcache_CacheValue *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **); #endif diff --git a/src/c/openssl.c b/src/c/openssl.c index 6d018707..533c3e21 100644 --- a/src/c/openssl.c +++ b/src/c/openssl.c @@ -35,7 +35,7 @@ static void random_password() { // OpenSSL callbacks static void thread_id(CRYPTO_THREADID *const result) { - CRYPTO_THREADID_set_numeric(result, pthread_self()); + CRYPTO_THREADID_set_numeric(result, (unsigned long)pthread_self()); } static void lock_or_unlock(const int mode, const int type, const char *file, const int line) { @@ -73,7 +73,7 @@ void uw_init_crypto() { if (access(uw_sig_file, F_OK)) { random_password(); - + if ((fd = open(uw_sig_file, O_WRONLY | O_CREAT, 0700)) < 0) { fprintf(stderr, "Can't open signature file %s\n", uw_sig_file); perror("open"); diff --git a/src/c/urweb.c b/src/c/urweb.c index 66fedfa2..61742693 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4498,7 +4498,7 @@ void uw_set_remoteSock(uw_context ctx, int sock) { // Sqlcache -void uw_sqlcache_listDelete(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { +void uw_Sqlcache_listDelete(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { if (list->first == entry) { list->first = entry->next; } @@ -4516,7 +4516,7 @@ void uw_sqlcache_listDelete(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry --(list->size); } -void uw_sqlcache_listAdd(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { +void uw_Sqlcache_listAdd(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { if (list->last) { list->last->next = entry; entry->prev = list->last; @@ -4528,22 +4528,22 @@ void uw_sqlcache_listAdd(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *en ++(list->size); } -void uw_sqlcache_listBump(uw_sqlcache_CacheList *list, uw_sqlcache_CacheEntry *entry) { - uw_sqlcache_listDelete(list, entry); - uw_sqlcache_listAdd(list, entry); +void uw_Sqlcache_listBump(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { + uw_Sqlcache_listDelete(list, entry); + uw_Sqlcache_listAdd(list, entry); } // TODO: deal with time properly. -time_t uw_sqlcache_getTimeNow() { +time_t uw_Sqlcache_getTimeNow() { return time(NULL); } -time_t uw_sqlcache_timeMax(time_t x, time_t y) { +time_t uw_Sqlcache_timeMax(time_t x, time_t y) { return difftime(x, y) > 0 ? x : y; } -void uw_sqlcache_freeuw_sqlcache_CacheValue(uw_sqlcache_CacheValue *value) { +void uw_Sqlcache_freeuw_Sqlcache_CacheValue(uw_Sqlcache_CacheValue *value) { if (value) { free(value->result); free(value->output); @@ -4551,83 +4551,83 @@ void uw_sqlcache_freeuw_sqlcache_CacheValue(uw_sqlcache_CacheValue *value) { } } -void uw_sqlcache_delete(uw_sqlcache_Cache *cache, uw_sqlcache_CacheEntry* entry) { - //uw_sqlcache_listUw_Sqlcache_Delete(cache->lru, entry); +void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_CacheEntry* entry) { + //uw_Sqlcache_listUw_Sqlcache_Delete(cache->lru, entry); HASH_DELETE(hh, cache->table, entry); - uw_sqlcache_freeuw_sqlcache_CacheValue(entry->value); + uw_Sqlcache_freeuw_Sqlcache_CacheValue(entry->value); free(entry->key); free(entry); } -uw_sqlcache_CacheValue *uw_sqlcache_checkHelper(uw_sqlcache_Cache *cache, char **keys, int timeInvalid) { +uw_Sqlcache_CacheValue *uw_Sqlcache_checkHelper(uw_Sqlcache_Cache *cache, char **keys, int timeInvalid) { char *key = keys[cache->height]; - uw_sqlcache_CacheEntry *entry; + uw_Sqlcache_CacheEntry *entry; HASH_FIND(hh, cache->table, key, strlen(key), entry); - timeInvalid = uw_sqlcache_timeMax(timeInvalid, cache->timeInvalid); + timeInvalid = uw_Sqlcache_timeMax(timeInvalid, cache->timeInvalid); if (entry && difftime(entry->timeValid, timeInvalid) > 0) { if (cache->height == 0) { // At height 0, entry->value is the desired value. - //uw_sqlcache_listBump(cache->lru, entry); + //uw_Sqlcache_listBump(cache->lru, entry); return entry->value; } else { // At height n+1, entry->value is a pointer to a cache at heignt n. - return uw_sqlcache_checkHelper(entry->value, keys, timeInvalid); + return uw_Sqlcache_checkHelper(entry->value, keys, timeInvalid); } } else { return NULL; } } -uw_sqlcache_CacheValue *uw_sqlcache_check(uw_sqlcache_Cache *cache, char **keys) { - return uw_sqlcache_checkHelper(cache, keys, 0); +uw_Sqlcache_CacheValue *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { + return uw_Sqlcache_checkHelper(cache, keys, 0); } -void uw_sqlcache_storeHelper(uw_sqlcache_Cache *cache, char **keys, uw_sqlcache_CacheValue *value, int timeNow) { - uw_sqlcache_CacheEntry *entry; +void uw_Sqlcache_storeHelper(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_CacheValue *value, int timeNow) { + uw_Sqlcache_CacheEntry *entry; char *key = keys[cache->height]; HASH_FIND(hh, cache->table, key, strlen(key), entry); if (!entry) { - entry = malloc(sizeof(uw_sqlcache_CacheEntry)); + entry = malloc(sizeof(uw_Sqlcache_CacheEntry)); entry->key = strdup(key); entry->value = NULL; HASH_ADD_KEYPTR(hh, cache->table, entry->key, strlen(entry->key), entry); } entry->timeValid = timeNow; if (cache->height == 0) { - //uw_sqlcache_listAdd(cache->lru, entry); - uw_sqlcache_freeuw_sqlcache_CacheValue(entry->value); + //uw_Sqlcache_listAdd(cache->lru, entry); + uw_Sqlcache_freeuw_Sqlcache_CacheValue(entry->value); entry->value = value; //if (cache->lru->size > MAX_SIZE) { - //uw_sqlcache_delete(cache, cache->lru->first); + //uw_Sqlcache_delete(cache, cache->lru->first); // TODO: return flushed value. //} } else { if (!entry->value) { - uw_sqlcache_Cache *newuw_sqlcache_Cache = malloc(sizeof(uw_sqlcache_Cache)); - newuw_sqlcache_Cache->table = NULL; - newuw_sqlcache_Cache->timeInvalid = timeNow; - newuw_sqlcache_Cache->lru = cache->lru; - newuw_sqlcache_Cache->height = cache->height - 1; - entry->value = newuw_sqlcache_Cache; + uw_Sqlcache_Cache *newuw_Sqlcache_Cache = malloc(sizeof(uw_Sqlcache_Cache)); + newuw_Sqlcache_Cache->table = NULL; + newuw_Sqlcache_Cache->timeInvalid = timeNow; + newuw_Sqlcache_Cache->lru = cache->lru; + newuw_Sqlcache_Cache->height = cache->height - 1; + entry->value = newuw_Sqlcache_Cache; } - uw_sqlcache_storeHelper(entry->value, keys, value, timeNow); + uw_Sqlcache_storeHelper(entry->value, keys, value, timeNow); } } -void uw_sqlcache_store(uw_sqlcache_Cache *cache, char **keys, uw_sqlcache_CacheValue *value) { - uw_sqlcache_storeHelper(cache, keys, value, uw_sqlcache_getTimeNow()); +void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_CacheValue *value) { + uw_Sqlcache_storeHelper(cache, keys, value, uw_Sqlcache_getTimeNow()); } -void uw_sqlcache_flushHelper(uw_sqlcache_Cache *cache, char **keys, int timeNow) { - uw_sqlcache_CacheEntry *entry; +void uw_Sqlcache_flushHelper(uw_Sqlcache_Cache *cache, char **keys, int timeNow) { + uw_Sqlcache_CacheEntry *entry; char *key = keys[cache->height]; if (key) { HASH_FIND(hh, cache->table, key, strlen(key), entry); if (entry) { if (cache->height == 0) { - uw_sqlcache_delete(cache, entry); + uw_Sqlcache_delete(cache, entry); } else { - uw_sqlcache_flushHelper(entry->value, keys, timeNow); + uw_Sqlcache_flushHelper(entry->value, keys, timeNow); } } } else { @@ -4636,6 +4636,6 @@ void uw_sqlcache_flushHelper(uw_sqlcache_Cache *cache, char **keys, int timeNow) } } -void uw_sqlcache_flush(uw_sqlcache_Cache *cache, char **keys) { - uw_sqlcache_flushHelper(cache, keys, uw_sqlcache_getTimeNow()); +void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys) { + uw_Sqlcache_flushHelper(cache, keys, uw_Sqlcache_getTimeNow()); } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 26590312..0030777f 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -64,7 +64,7 @@ fun setupQuery {index, params} = in Print.box - [string ("static uw_sqlcache_Cache cacheStruct" ^ i ^ " = {"), + [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), newline, string " .table = NULL,", newline, @@ -74,7 +74,7 @@ fun setupQuery {index, params} = newline, string (" .height = " ^ Int.toString (params - 1) ^ "};"), newline, - string ("static uw_sqlcache_Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), + string ("static uw_Sqlcache_Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), newline, newline, @@ -83,7 +83,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_sqlcache_CacheValue *v = uw_sqlcache_check(cache" ^ i ^ ", ks);"), + string (" uw_Sqlcache_CacheValue *v = uw_Sqlcache_check(cache" ^ i ^ ", ks);"), newline, string " if (v) {", newline, @@ -112,7 +112,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_sqlcache_CacheValue *v = malloc(sizeof(uw_sqlcache_CacheValue));"), + string (" uw_Sqlcache_CacheValue *v = malloc(sizeof(uw_Sqlcache_CacheValue));"), newline, string " v->result = strdup(s);", newline, @@ -120,7 +120,7 @@ fun setupQuery {index, params} = newline, string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline, - string (" uw_sqlcache_store(cache" ^ i ^ ", ks, v);"), + string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", newline, @@ -133,7 +133,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_sqlcache_flush(cache" ^ i ^ ", ks);"), + string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks);"), newline, string " return uw_unit_v;", newline, diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 8fae15eb..8efe999c 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache (* DEBUG: add back :> SQLCACHE. *) = struct +structure Sqlcache :> SQLCACHE = struct open Mono @@ -9,6 +9,12 @@ structure SS = BinarySetFn(SK) structure SM = BinaryMapFn(SK) structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) +fun iterate f n x = if n < 0 + then raise Fail "Can't iterate function negative number of times." + else if n = 0 + then x + else iterate f (n-1) (f x) + (* Filled in by [cacheWrap] during [Sqlcache]. *) val ffiInfo : {index : int, params : int} list ref = ref [] @@ -36,7 +42,7 @@ val ffiEffectful = "urlifyChannel_w"] in fn (m, f) => Settings.isEffectful (m, f) - andalso not (m = "Basis" andalso SS.member (fs, f)) + orelse not (m = "Basis" andalso SS.member (fs, f)) end val cache = ref LruCache.cache @@ -45,8 +51,8 @@ fun getCache () = !cache (* Used to have type context for local variables in MonoUtil functions. *) val doBind = - fn (ctx, MonoUtil.Exp.RelE (_, t)) => t :: ctx - | (ctx, _) => ctx + fn (env, MonoUtil.Exp.RelE (s, t)) => MonoEnv.pushERel env s t NONE + | (env, _) => env (*******************) @@ -59,12 +65,12 @@ fun effectful (effs : IS.set) = val isFunction = fn (TFun _, _) => true | _ => false - fun doExp (ctx, e) = + fun doExp (env, e) = case e of EPrim _ => false (* For now: variables of function type might be effectful, but others are fully evaluated and are therefore not effectful. *) - | ERel n => isFunction (List.nth (ctx, n)) + | ERel n => isFunction (#2 (MonoEnv.lookupERel env n)) | ENamed n => IS.member (effs, n) | EFfi (m, f) => ffiEffectful (m, f) | EFfiApp (m, f, _) => ffiEffectful (m, f) @@ -84,9 +90,8 @@ fun effectful (effs : IS.set) = | EWrite _ => false | ESeq _ => false | ELet _ => false - (* ASK: what should we do about closures? *) - | EClosure _ => false | EUnurlify _ => false + (* ASK: what should we do about closures? *) (* Everything else is some sort of effect. We could flip this and explicitly list bits of Mono that are effectful, but this is conservatively robust to future changes (however unlikely). *) @@ -99,7 +104,7 @@ fun effectful (effs : IS.set) = fun effectfulDecls (decls, _) = let fun doVal ((_, name, _, e, _), effs) = - if effectful effs [] e + if effectful effs MonoEnv.empty e then IS.add (effs, name) else effs val doDecl = @@ -362,9 +367,9 @@ structure ConflictMaps = struct val markDml : (Sql.cmp * Sql.sqexp * Sql.sqexp) formula -> (Sql.cmp * atomExp option * atomExp option) formula = mapFormula (toAtomExps DmlRel) + (* No eqs should have key conflicts because no variable is in two equivalence classes, so the [#1] could be [#2]. *) - val mergeEqs : (atomExp IntBinaryMap.map option list -> atomExp IntBinaryMap.map option) = List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) @@ -511,10 +516,10 @@ fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = fun fileMapfold doExp file start = case MonoUtil.File.mapfoldB {typ = Search.return2, - exp = fn ctx => fn e' => fn s => Search.Continue (doExp ctx e' s), + exp = fn env => fn e' => fn s => Search.Continue (doExp env e' s), decl = fn _ => Search.return2, bind = doBind} - [] file start of + MonoEnv.empty file start of Search.Continue x => x | Search.Return _ => raise Match @@ -556,8 +561,9 @@ fun factorOutNontrivial text = fun addChecking file = let - fun doExp ctx (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = + fun doExp env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = fn e' as EQuery {query = origQueryText, + (* ASK: could this get messed up by inlining? *) sqlcacheInfo = urlifiedRel0, state = resultTyp, initial, body, tables, exps} => @@ -581,10 +587,14 @@ fun addChecking file = fun guard b x = if b then x else NONE val effs = effectfulDecls file (* We use dummyTyp here. I think this is okay because databases - don't store (effectful) functions, but there could be some - corner case I missed. *) + don't store (effectful) functions, but perhaps there's some + pathalogical corner case missing.... *) fun safe bound = - not o effectful effs (List.tabulate (bound, fn _ => dummyTyp) @ ctx) + not + o effectful effs + (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) + bound + env) val attempt = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( @@ -602,7 +612,7 @@ fun addChecking file = end | e' => (e', queryInfo) in - fileMapfold (fn ctx => fn exp => fn state => doExp ctx state exp) + fileMapfold (fn env => fn exp => fn state => doExp env state exp) file (SIMM.empty, IM.empty, 0) end @@ -716,4 +726,134 @@ fun go file = file' end + +(**********************) +(* Mono Type Checking *) +(**********************) + +val typOfPrim = + fn Prim.Int _ => TFfi ("Basis", "int") + | Prim.Float _ => TFfi ("Basis", "int") + +fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = + fn EPrim p => SOME (TFfi ("Basis", case p of + Prim.Int _ => "int" + | Prim.Float _ => "double" + | Prim.String _ => "string" + | Prim.Char _ => "char"), + dummyLoc) + | ERel n => SOME (#2 (MonoEnv.lookupERel env n)) + | ENamed n => SOME (#2 (MonoEnv.lookupENamed env n)) + (* ASK: okay to make a new [ref] each time? *) + | ECon (dk, PConVar nCon, _) => + let + val (_, _, nData) = MonoEnv.lookupConstructor env nCon + val (_, cs) = MonoEnv.lookupDatatype env nData + in + SOME (TDatatype (nData, ref (dk, cs)), dummyLoc) + end + | ECon (_, PConFfi {mod = s, datatyp, ...}, _) => SOME (TFfi (s, datatyp), dummyLoc) + | ENone t => SOME (TOption t, dummyLoc) + | ESome (t, _) => SOME (TOption t, dummyLoc) + | EFfi _ => NONE + | EFfiApp _ => NONE + | EApp (e1, e2) => (case typOfExp env e1 of + SOME (TFun (_, t), _) => SOME t + | _ => NONE) + | EAbs (_, t1, t2, _) => SOME (TFun (t1, t2), dummyLoc) + (* ASK: is this right? *) + | EUnop (unop, e) => (case unop of + "!" => SOME (TFfi ("Basis", "bool"), dummyLoc) + | "-" => typOfExp env e + | _ => NONE) + (* ASK: how should this (and other "=> NONE" cases) work? *) + | EBinop _ => NONE + | ERecord fields => SOME (TRecord (map (fn (s, _, t) => (s, t)) fields), dummyLoc) + | EField (e, s) => (case typOfExp env e of + SOME (TRecord fields, _) => + (case List.find (fn (s', _) => s = s') fields of + SOME (_, t) => SOME t + | _ => NONE) + | _ => NONE) + | ECase (_, _, {result, ...}) => SOME result + | EStrcat _ => SOME (TFfi ("Basis", "string"), dummyLoc) + | EWrite _ => SOME (TRecord [], dummyLoc) + | ESeq (_, e) => typOfExp env e + | ELet (s, t, e1, e2) => typOfExp (MonoEnv.pushERel env s t (SOME e1)) e2 + | EClosure _ => NONE + | EUnurlify (_, t, _) => SOME t + +and typOfExp env (e', loc) = typOfExp' env e' + + +(*******************************) +(* Caching Pure Subexpressions *) +(*******************************) + +datatype subexp = Pure of unit -> exp | Impure of exp + +val isImpure = + fn Pure _ => false + | Impure _ => true + +val expOfSubexp = + fn Pure f => f () + | Impure e => e + +val makeCache : MonoEnv.env -> exp' -> exp' = fn _ => fn _ => raise Fail "TODO" + +fun pureCache (effs : IS.set) (env : MonoEnv.env) (exp as (exp', loc)) : subexp = + let + fun wrapBindN f (args : (MonoEnv.env * exp) list) = + let + val subexps = map (fn (env, exp) => pureCache effs env exp) args + in + if List.exists isImpure subexps + then Impure (f (map expOfSubexp subexps), loc) + else Pure (fn () => (makeCache env (f (map #2 args)), loc)) + end + fun wrapBind1 f arg = + wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] + fun wrapBind2 f (arg1, arg2) = + wrapBindN (fn [arg1, arg2] => f (arg1, arg2) | _ => raise Match) [arg1, arg2] + fun wrapN f es = wrapBindN f (map (fn e => (env, e)) es) + fun wrap1 f e = wrapBind1 f (env, e) + fun wrap2 f (e1, e2) = wrapBind2 f ((env, e1), (env, e2)) + in + case exp' of + ECon (dk, pc, SOME e) => wrap1 (fn e => ECon (dk, pc, SOME e)) e + | ESome (t, e) => wrap1 (fn e => ESome (t, e)) e + | EFfiApp (s1, s2, args) => + wrapN (fn es => EFfiApp (s1, s2, ListPair.map (fn (e, (_, t)) => (e, t)) (es, args))) + (map #1 args) + | EApp (e1, e2) => wrap2 EApp (e1, e2) + | EAbs (s, t1, t2, e) => + wrapBind1 (fn e => EAbs (s, t1, t2, e)) + (MonoEnv.pushERel env s t1 NONE, e) + | EUnop (s, e) => wrap1 (fn e => EUnop (s, e)) e + | EBinop (bi, s, e1, e2) => wrap2 (fn (e1, e2) => EBinop (bi, s, e1, e2)) (e1, e2) + | ERecord fields => + wrapN (fn es => ERecord (ListPair.map (fn (e, (s, _, t)) => (s, e, t)) (es, fields))) + (map #2 fields) + | EField (e, s) => wrap1 (fn e => EField (e, s)) e + | ECase (e, cases, {disc, result}) => + wrapBindN (fn (e::es) => + ECase (e, + (ListPair.map (fn (e, (p, _)) => (p, e)) (es, cases)), + {disc = disc, result = result})) + ((env, e) :: map (fn (p, e) => (MonoEnv.patBinds env p, e)) cases) + | EStrcat (e1, e2) => wrap2 EStrcat (e1, e2) + (* We record page writes, so they're cachable. *) + | EWrite e => wrap1 EWrite e + | ESeq (e1, e2) => wrap2 ESeq (e1, e2) + | ELet (s, t, e1, e2) => + wrapBind2 (fn (e1, e2) => ELet (s, t, e1, e2)) + ((env, e1), (MonoEnv.pushERel env s t (SOME e1), e2)) + (* ASK: | EClosure (n, es) => ? *) + | EUnurlify (e, t, b) => wrap1 (fn e => EUnurlify (e, t, b)) e + | _ => if effectful effs env exp + then Impure exp + else Pure (fn () => (makeCache env exp', loc)) + end + end -- cgit v1.2.3 From 68879bbb4bf58e4709c96ba6904071ce5d24a906 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 13 Sep 2015 17:02:17 -0400 Subject: Make Mono.file a record for readability upon extension. --- src/cjrize.sml | 2 +- src/dbmodecheck.sml | 7 +++---- src/fuse.sml | 8 ++++---- src/iflow.sml | 4 ++-- src/jscomp.sml | 6 +++--- src/mono.sml | 2 +- src/mono_print.sml | 4 ++-- src/mono_reduce.sml | 8 ++++---- src/mono_shake.sml | 11 ++++++----- src/mono_util.sml | 16 ++++++++-------- src/monoize.sml | 2 +- src/name_js.sml | 12 ++++++------ src/pathcheck.sml | 6 +++--- src/scriptcheck.sml | 9 ++++----- src/sigcheck.sml | 8 ++++---- src/sqlcache.sml | 2 +- src/untangle.sml | 4 ++-- 17 files changed, 55 insertions(+), 56 deletions(-) (limited to 'src') diff --git a/src/cjrize.sml b/src/cjrize.sml index b20d6d22..870c66be 100644 --- a/src/cjrize.sml +++ b/src/cjrize.sml @@ -701,7 +701,7 @@ fun cifyDecl ((d, loc), sm) = | L.DPolicy _ => (NONE, NONE, sm) | L.DOnError n => (SOME (L'.DOnError n, loc), NONE, sm) -fun cjrize (ds, sideInfo) = +fun cjrize {decls = ds, sideInfo} = let val (dsF, ds, ps, sm) = foldl (fn (d, (dsF, ds, ps, sm)) => let diff --git a/src/dbmodecheck.sml b/src/dbmodecheck.sml index eb416cea..491927c0 100644 --- a/src/dbmodecheck.sml +++ b/src/dbmodecheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -31,7 +31,7 @@ open Mono structure IM = IntBinaryMap -fun classify (ds, ps) = +fun classify {decls = ds, sideInfo = ps} = let fun mergeModes (m1, m2) = case (m1, m2) of @@ -79,8 +79,7 @@ fun classify (ds, ps) = val ps = IM.foldli (fn (n, mode, ps) => (n, ServerOnly, mode) :: ps) ps modes in - (ds, ps) + {decls = ds, sideInfo = ps} end end - diff --git a/src/fuse.sml b/src/fuse.sml index 5193e59a..017f79d5 100644 --- a/src/fuse.sml +++ b/src/fuse.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -139,14 +139,14 @@ fun fuse file = in (U.Decl.map {typ = fn x => x, exp = exp, - decl = fn x => x} + decl = fn x => x} d, (funcs, maxName)) end - val (ds, _) = ListUtil.foldlMap doDecl (IM.empty, U.File.maxName file + 1) (#1 file) + val (ds, _) = ListUtil.foldlMap doDecl (IM.empty, U.File.maxName file + 1) (#decls file) in - (ds, #2 file) + {decls = ds, sideInfo = #sideInfo file} end end diff --git a/src/iflow.sml b/src/iflow.sml index b8346baa..6ed7e69d 100644 --- a/src/iflow.sml +++ b/src/iflow.sml @@ -1900,7 +1900,7 @@ fun check (file : file) = val exptd = foldl (fn ((d, _), exptd) => case d of DExport (_, _, n, _, _, _) => IS.add (exptd, n) - | _ => exptd) IS.empty (#1 file) + | _ => exptd) IS.empty (#decls file) fun decl (d, loc) = case d of @@ -2164,7 +2164,7 @@ fun check (file : file) = | _ => () in - app decl (#1 file) + app decl (#decls file) end val check = fn file => diff --git a/src/jscomp.sml b/src/jscomp.sml index e5f7d234..29b11820 100644 --- a/src/jscomp.sml +++ b/src/jscomp.sml @@ -79,7 +79,7 @@ fun process (file : file) = someTs) someTs dts, nameds) | (_, state) => state) - (IM.empty, IM.empty) (#1 file) + (IM.empty, IM.empty) (#decls file) fun str loc s = (EPrim (Prim.String (Prim.Normal, s)), loc) @@ -1335,7 +1335,7 @@ fun process (file : file) = listInjectors = TM.empty, decoders = IM.empty, maxName = U.File.maxName file + 1} - (#1 file) + (#decls file) val inf = TextIO.openIn (OS.Path.joinDirFile {dir = Settings.libJs (), file = "urweb.js"}) fun lines acc = @@ -1365,7 +1365,7 @@ fun process (file : file) = "" in TextIO.closeIn inf; - ((DJavaScript script, ErrorMsg.dummySpan) :: ds, #2 file) + {decls = (DJavaScript script, ErrorMsg.dummySpan) :: ds, sideInfo = #sideInfo file} end end diff --git a/src/mono.sml b/src/mono.sml index 5185e48c..94c47814 100644 --- a/src/mono.sml +++ b/src/mono.sml @@ -168,6 +168,6 @@ datatype dbmode = | OneQuery | AnyDb -type file = decl list * (int * sidedness * dbmode) list +type file = {decls : decl list, sideInfo : (int * sidedness * dbmode) list} end diff --git a/src/mono_print.sml b/src/mono_print.sml index 0ff51f37..0b5fdadc 100644 --- a/src/mono_print.sml +++ b/src/mono_print.sml @@ -542,12 +542,12 @@ fun p_decl env (dAll as (d, _) : decl) = p_policy env p] | DOnError _ => string "ONERROR" -fun p_file env (file, _) = +fun p_file env (file : file) = let val (pds, _) = ListUtil.foldlMap (fn (d, env) => (p_decl env d, E.declBinds env d)) - env file + env (#decls file) in p_list_sep newline (fn x => x) pds end diff --git a/src/mono_reduce.sml b/src/mono_reduce.sml index 61866af7..19c07f12 100644 --- a/src/mono_reduce.sml +++ b/src/mono_reduce.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -390,7 +390,7 @@ fun reduce' (file : file) = absCounts vis) | _ => (timpures, impures, absCounts) end) - (IS.empty, IS.empty, IM.empty) (#1 file) + (IS.empty, IS.empty, IM.empty) (#decls file) val uses = U.File.fold {typ = fn (_, m) => m, exp = fn (e, m) => @@ -406,7 +406,7 @@ fun reduce' (file : file) = val functionInside' = U.Typ.exists (fn c => case c of TFun _ => true | _ => false) - + fun functionInside t = case #1 t of TFun (t1, t2) => functionInside' t1 orelse functionInside t2 @@ -520,7 +520,7 @@ fun reduce' (file : file) = | ERedirect (e, _) => summarize d e @ [Abort] | EWrite e => summarize d e @ [WritePage] - + | ESeq (e1, e2) => summarize d e1 @ summarize d e2 | ELet (_, _, e1, e2) => summarize d e1 @ summarize (if d = ~1 then ~1 else d + 1) e2 diff --git a/src/mono_shake.sml b/src/mono_shake.sml index 5818fea0..b394af5b 100644 --- a/src/mono_shake.sml +++ b/src/mono_shake.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -60,7 +60,7 @@ fun shake (file : file) = | ((DTask _, _), acc) => acc | ((DPolicy _, _), acc) => acc | ((DOnError _, _), acc) => acc) - (IM.empty, IM.empty) (#1 file) + (IM.empty, IM.empty) (#decls file) fun typ (c, s) = case c of @@ -130,7 +130,7 @@ fun shake (file : file) = usedVars st e1 end | ((DOnError n, _), (page_cs, page_es)) => (page_cs, IS.add (page_es, n)) - | (_, st) => st) (IS.empty, IS.empty) (#1 file) + | (_, st) => st) (IS.empty, IS.empty) (#decls file) val s = {con = page_cs, exp = page_es} @@ -145,7 +145,8 @@ fun shake (file : file) = NONE => raise Fail "MonoShake: Couldn't find 'val'" | SOME (t, e) => shakeExp s e) s page_es in - (List.filter (fn (DDatatype dts, _) => List.exists (fn (_, n, _) => IS.member (#con s, n)) dts + {decls = + List.filter (fn (DDatatype dts, _) => List.exists (fn (_, n, _) => IS.member (#con s, n)) dts | (DVal (_, n, _, _, _), _) => IS.member (#exp s, n) | (DValRec vis, _) => List.exists (fn (_, n, _, _, _) => IS.member (#exp s, n)) vis | (DExport _, _) => true @@ -158,7 +159,7 @@ fun shake (file : file) = | (DStyle _, _) => true | (DTask _, _) => true | (DPolicy _, _) => true - | (DOnError _, _) => true) (#1 file), #2 file) + | (DOnError _, _) => true) (#decls file), sideInfo = #sideInfo file} end end diff --git a/src/mono_util.sml b/src/mono_util.sml index ba10ad32..64aeb318 100644 --- a/src/mono_util.sml +++ b/src/mono_util.sml @@ -695,9 +695,9 @@ fun mapfoldB (all as {bind, ...}) = let val mfd = Decl.mapfoldB all - fun mff ctx (ds, ps) = - case ds of - nil => S.return2 (nil, ps) + fun mff ctx (file : file) = + case #decls file of + nil => S.return2 {decls = nil, sideInfo = #sideInfo file} | d :: ds' => S.bind2 (mfd ctx d, fn d' => @@ -736,9 +736,9 @@ fun mapfoldB (all as {bind, ...}) = | DPolicy _ => ctx | DOnError _ => ctx in - S.map2 (mff ctx' (ds', ps), - fn (ds', _) => - (d' :: ds', ps)) + S.map2 (mff ctx' {decls = ds', sideInfo = #sideInfo file}, + fn {decls = ds', ...} => + {decls = d' :: ds', sideInfo = #sideInfo file}) end) in mff @@ -791,7 +791,7 @@ fun maxName (f : file) = | DStyle _ => count | DTask _ => count | DPolicy _ => count - | DOnError _ => count) 0 (#1 f) + | DOnError _ => count) 0 (#decls f) fun appLoc f (fl : file) = let @@ -822,7 +822,7 @@ fun appLoc f (fl : file) = | PolUpdate e1 => eal e1 | PolSequence e1 => eal e1 in - app appl (#1 fl) + app appl (#decls fl) end end diff --git a/src/monoize.sml b/src/monoize.sml index d8c4d276..d0b93c50 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -4786,7 +4786,7 @@ fun monoize env file = pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; - (rev ds, []) + {decls = rev ds, sideInfo = []} end end diff --git a/src/name_js.sml b/src/name_js.sml index f10e5938..b838d1d3 100644 --- a/src/name_js.sml +++ b/src/name_js.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -92,7 +92,7 @@ fun rewrite file = | DValRec vis => foldl (fn ((_, n, _, _, _), dontName) => IS.add (dontName, n)) dontName vis | _ => dontName else - dontName) IS.empty (#1 file) + dontName) IS.empty (#decls file) val (ds, _) = ListUtil.foldlMapConcat (fn (d, nextName) => let @@ -126,9 +126,9 @@ fun rewrite file = val vs = freeVars e' val vs = IS.listItems vs - + val x = "script" ^ Int.toString nextName - + val un = (TRecord [], loc) val s = (TFfi ("Basis", "string"), loc) val base = (TFun (un, s), loc) @@ -165,9 +165,9 @@ fun rewrite file = DValRec vis => [(DValRec (vis @ newDs), #2 d)] | _ => List.revAppend (map (fn vi => (DVal vi, #2 d)) newDs, [d]), nextName) - end) (U.File.maxName file + 1) (#1 file) + end) (U.File.maxName file + 1) (#decls file) in - (ds, #2 file) + {decls = ds, sideInfo = #sideInfo file} end end diff --git a/src/pathcheck.sml b/src/pathcheck.sml index 3533032e..2de3b544 100644 --- a/src/pathcheck.sml +++ b/src/pathcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -68,7 +68,7 @@ fun checkDecl ((d, loc), (funcs, rels, cookies, styles)) = in case d of DExport (_, s, _, _, _, _) => doFunc s - + | DTable (s, _, pe, ce) => let fun constraints (e, rels) = @@ -110,6 +110,6 @@ fun checkDecl ((d, loc), (funcs, rels, cookies, styles)) = | _ => (funcs, rels, cookies, styles) end -fun check (ds, _) = ignore (foldl checkDecl (SS.empty, SS.empty, SS.empty, SS.empty) ds) +fun check (file : file) = ignore (foldl checkDecl (SS.empty, SS.empty, SS.empty, SS.empty) (#decls file)) end diff --git a/src/scriptcheck.sml b/src/scriptcheck.sml index 0d30ebcb..d1e893dd 100644 --- a/src/scriptcheck.sml +++ b/src/scriptcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -91,7 +91,7 @@ fun dump (r : rpcmap) = dump r')) m; print "\n") -fun classify (ds, ps) = +fun classify {decls = ds, sideInfo = ps} = let val proto = Settings.currentProtocol () @@ -100,7 +100,7 @@ fun classify (ds, ps) = fun hasClient {basis, rpcs, funcs, push} = MonoUtil.Exp.exists {typ = fn _ => false, exp = fn ERecv _ => push - | EFfiApp ("Basis", x, _) => SS.member (basis, x) + | EFfiApp ("Basis", x, _) => SS.member (basis, x) | EJavaScript _ => not push | ENamed n => IS.member (funcs, n) | EServerCall (e, _, _, _) => @@ -175,8 +175,7 @@ fun classify (ds, ps) = else ServerOnly, AnyDb)) (IS.listItems all_ids) in - (ds, ps) + {decls = ds, sideInfo = ps} end end - diff --git a/src/sigcheck.sml b/src/sigcheck.sml index a6ed7653..35302bae 100644 --- a/src/sigcheck.sml +++ b/src/sigcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -32,7 +32,7 @@ open Mono structure IS = IntBinarySet structure E = ErrorMsg -fun check (ds, sl) = +fun check (file : file) = let fun isSiggy siggers = MonoUtil.Decl.exists {typ = fn _ => false, @@ -89,9 +89,9 @@ fun check (ds, sl) = (sigify sigdecs d, (siggers, sigdecs)) | _ => (sigify sigdecs d, (siggers, sigdecs)) - val (ds, _) = ListUtil.foldlMap doDecl (IS.empty, IS.empty) ds + val (ds, _) = ListUtil.foldlMap doDecl (IS.empty, IS.empty) (#decls file) in - (ds, sl) + {decls = ds, sideInfo = #sideInfo file} end end diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 8efe999c..40081351 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -101,7 +101,7 @@ fun effectful (effs : IS.set) = end (* TODO: test this. *) -fun effectfulDecls (decls, _) = +fun effectfulDecls ({decls, ...} : file) = let fun doVal ((_, name, _, e, _), effs) = if effectful effs MonoEnv.empty e diff --git a/src/untangle.sml b/src/untangle.sml index bcb90ed6..8ed9c8f6 100644 --- a/src/untangle.sml +++ b/src/untangle.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -208,7 +208,7 @@ fun untangle (file : file) = end | _ => [dAll] in - (ListUtil.mapConcat decl (#1 file), #2 file) + {decls = ListUtil.mapConcat decl (#decls file), sideInfo = #sideInfo file} end end -- cgit v1.2.3 From 3e42cccfaef1157ca14cd102959b867c996503a9 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 21 Sep 2015 10:16:55 -0400 Subject: Revert to revision 2222. --- src/cjrize.sml | 2 +- src/dbmodecheck.sml | 7 ++++--- src/fuse.sml | 8 ++++---- src/iflow.sml | 4 ++-- src/jscomp.sml | 6 +++--- src/mono.sml | 2 +- src/mono_print.sml | 4 ++-- src/mono_reduce.sml | 8 ++++---- src/mono_shake.sml | 11 +++++------ src/mono_util.sml | 16 ++++++++-------- src/monoize.sml | 2 +- src/name_js.sml | 12 ++++++------ src/pathcheck.sml | 6 +++--- src/scriptcheck.sml | 9 +++++---- src/sigcheck.sml | 8 ++++---- src/sqlcache.sml | 2 +- src/untangle.sml | 4 ++-- 17 files changed, 56 insertions(+), 55 deletions(-) (limited to 'src') diff --git a/src/cjrize.sml b/src/cjrize.sml index 870c66be..b20d6d22 100644 --- a/src/cjrize.sml +++ b/src/cjrize.sml @@ -701,7 +701,7 @@ fun cifyDecl ((d, loc), sm) = | L.DPolicy _ => (NONE, NONE, sm) | L.DOnError n => (SOME (L'.DOnError n, loc), NONE, sm) -fun cjrize {decls = ds, sideInfo} = +fun cjrize (ds, sideInfo) = let val (dsF, ds, ps, sm) = foldl (fn (d, (dsF, ds, ps, sm)) => let diff --git a/src/dbmodecheck.sml b/src/dbmodecheck.sml index 491927c0..eb416cea 100644 --- a/src/dbmodecheck.sml +++ b/src/dbmodecheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -31,7 +31,7 @@ open Mono structure IM = IntBinaryMap -fun classify {decls = ds, sideInfo = ps} = +fun classify (ds, ps) = let fun mergeModes (m1, m2) = case (m1, m2) of @@ -79,7 +79,8 @@ fun classify {decls = ds, sideInfo = ps} = val ps = IM.foldli (fn (n, mode, ps) => (n, ServerOnly, mode) :: ps) ps modes in - {decls = ds, sideInfo = ps} + (ds, ps) end end + diff --git a/src/fuse.sml b/src/fuse.sml index 017f79d5..5193e59a 100644 --- a/src/fuse.sml +++ b/src/fuse.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -139,14 +139,14 @@ fun fuse file = in (U.Decl.map {typ = fn x => x, exp = exp, - decl = fn x => x} + decl = fn x => x} d, (funcs, maxName)) end - val (ds, _) = ListUtil.foldlMap doDecl (IM.empty, U.File.maxName file + 1) (#decls file) + val (ds, _) = ListUtil.foldlMap doDecl (IM.empty, U.File.maxName file + 1) (#1 file) in - {decls = ds, sideInfo = #sideInfo file} + (ds, #2 file) end end diff --git a/src/iflow.sml b/src/iflow.sml index 6ed7e69d..b8346baa 100644 --- a/src/iflow.sml +++ b/src/iflow.sml @@ -1900,7 +1900,7 @@ fun check (file : file) = val exptd = foldl (fn ((d, _), exptd) => case d of DExport (_, _, n, _, _, _) => IS.add (exptd, n) - | _ => exptd) IS.empty (#decls file) + | _ => exptd) IS.empty (#1 file) fun decl (d, loc) = case d of @@ -2164,7 +2164,7 @@ fun check (file : file) = | _ => () in - app decl (#decls file) + app decl (#1 file) end val check = fn file => diff --git a/src/jscomp.sml b/src/jscomp.sml index 29b11820..e5f7d234 100644 --- a/src/jscomp.sml +++ b/src/jscomp.sml @@ -79,7 +79,7 @@ fun process (file : file) = someTs) someTs dts, nameds) | (_, state) => state) - (IM.empty, IM.empty) (#decls file) + (IM.empty, IM.empty) (#1 file) fun str loc s = (EPrim (Prim.String (Prim.Normal, s)), loc) @@ -1335,7 +1335,7 @@ fun process (file : file) = listInjectors = TM.empty, decoders = IM.empty, maxName = U.File.maxName file + 1} - (#decls file) + (#1 file) val inf = TextIO.openIn (OS.Path.joinDirFile {dir = Settings.libJs (), file = "urweb.js"}) fun lines acc = @@ -1365,7 +1365,7 @@ fun process (file : file) = "" in TextIO.closeIn inf; - {decls = (DJavaScript script, ErrorMsg.dummySpan) :: ds, sideInfo = #sideInfo file} + ((DJavaScript script, ErrorMsg.dummySpan) :: ds, #2 file) end end diff --git a/src/mono.sml b/src/mono.sml index 94c47814..5185e48c 100644 --- a/src/mono.sml +++ b/src/mono.sml @@ -168,6 +168,6 @@ datatype dbmode = | OneQuery | AnyDb -type file = {decls : decl list, sideInfo : (int * sidedness * dbmode) list} +type file = decl list * (int * sidedness * dbmode) list end diff --git a/src/mono_print.sml b/src/mono_print.sml index 0b5fdadc..0ff51f37 100644 --- a/src/mono_print.sml +++ b/src/mono_print.sml @@ -542,12 +542,12 @@ fun p_decl env (dAll as (d, _) : decl) = p_policy env p] | DOnError _ => string "ONERROR" -fun p_file env (file : file) = +fun p_file env (file, _) = let val (pds, _) = ListUtil.foldlMap (fn (d, env) => (p_decl env d, E.declBinds env d)) - env (#decls file) + env file in p_list_sep newline (fn x => x) pds end diff --git a/src/mono_reduce.sml b/src/mono_reduce.sml index 19c07f12..61866af7 100644 --- a/src/mono_reduce.sml +++ b/src/mono_reduce.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -390,7 +390,7 @@ fun reduce' (file : file) = absCounts vis) | _ => (timpures, impures, absCounts) end) - (IS.empty, IS.empty, IM.empty) (#decls file) + (IS.empty, IS.empty, IM.empty) (#1 file) val uses = U.File.fold {typ = fn (_, m) => m, exp = fn (e, m) => @@ -406,7 +406,7 @@ fun reduce' (file : file) = val functionInside' = U.Typ.exists (fn c => case c of TFun _ => true | _ => false) - + fun functionInside t = case #1 t of TFun (t1, t2) => functionInside' t1 orelse functionInside t2 @@ -520,7 +520,7 @@ fun reduce' (file : file) = | ERedirect (e, _) => summarize d e @ [Abort] | EWrite e => summarize d e @ [WritePage] - + | ESeq (e1, e2) => summarize d e1 @ summarize d e2 | ELet (_, _, e1, e2) => summarize d e1 @ summarize (if d = ~1 then ~1 else d + 1) e2 diff --git a/src/mono_shake.sml b/src/mono_shake.sml index b394af5b..5818fea0 100644 --- a/src/mono_shake.sml +++ b/src/mono_shake.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -60,7 +60,7 @@ fun shake (file : file) = | ((DTask _, _), acc) => acc | ((DPolicy _, _), acc) => acc | ((DOnError _, _), acc) => acc) - (IM.empty, IM.empty) (#decls file) + (IM.empty, IM.empty) (#1 file) fun typ (c, s) = case c of @@ -130,7 +130,7 @@ fun shake (file : file) = usedVars st e1 end | ((DOnError n, _), (page_cs, page_es)) => (page_cs, IS.add (page_es, n)) - | (_, st) => st) (IS.empty, IS.empty) (#decls file) + | (_, st) => st) (IS.empty, IS.empty) (#1 file) val s = {con = page_cs, exp = page_es} @@ -145,8 +145,7 @@ fun shake (file : file) = NONE => raise Fail "MonoShake: Couldn't find 'val'" | SOME (t, e) => shakeExp s e) s page_es in - {decls = - List.filter (fn (DDatatype dts, _) => List.exists (fn (_, n, _) => IS.member (#con s, n)) dts + (List.filter (fn (DDatatype dts, _) => List.exists (fn (_, n, _) => IS.member (#con s, n)) dts | (DVal (_, n, _, _, _), _) => IS.member (#exp s, n) | (DValRec vis, _) => List.exists (fn (_, n, _, _, _) => IS.member (#exp s, n)) vis | (DExport _, _) => true @@ -159,7 +158,7 @@ fun shake (file : file) = | (DStyle _, _) => true | (DTask _, _) => true | (DPolicy _, _) => true - | (DOnError _, _) => true) (#decls file), sideInfo = #sideInfo file} + | (DOnError _, _) => true) (#1 file), #2 file) end end diff --git a/src/mono_util.sml b/src/mono_util.sml index 64aeb318..ba10ad32 100644 --- a/src/mono_util.sml +++ b/src/mono_util.sml @@ -695,9 +695,9 @@ fun mapfoldB (all as {bind, ...}) = let val mfd = Decl.mapfoldB all - fun mff ctx (file : file) = - case #decls file of - nil => S.return2 {decls = nil, sideInfo = #sideInfo file} + fun mff ctx (ds, ps) = + case ds of + nil => S.return2 (nil, ps) | d :: ds' => S.bind2 (mfd ctx d, fn d' => @@ -736,9 +736,9 @@ fun mapfoldB (all as {bind, ...}) = | DPolicy _ => ctx | DOnError _ => ctx in - S.map2 (mff ctx' {decls = ds', sideInfo = #sideInfo file}, - fn {decls = ds', ...} => - {decls = d' :: ds', sideInfo = #sideInfo file}) + S.map2 (mff ctx' (ds', ps), + fn (ds', _) => + (d' :: ds', ps)) end) in mff @@ -791,7 +791,7 @@ fun maxName (f : file) = | DStyle _ => count | DTask _ => count | DPolicy _ => count - | DOnError _ => count) 0 (#decls f) + | DOnError _ => count) 0 (#1 f) fun appLoc f (fl : file) = let @@ -822,7 +822,7 @@ fun appLoc f (fl : file) = | PolUpdate e1 => eal e1 | PolSequence e1 => eal e1 in - app appl (#decls fl) + app appl (#1 fl) end end diff --git a/src/monoize.sml b/src/monoize.sml index d0b93c50..d8c4d276 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -4786,7 +4786,7 @@ fun monoize env file = pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; - {decls = rev ds, sideInfo = []} + (rev ds, []) end end diff --git a/src/name_js.sml b/src/name_js.sml index b838d1d3..f10e5938 100644 --- a/src/name_js.sml +++ b/src/name_js.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -92,7 +92,7 @@ fun rewrite file = | DValRec vis => foldl (fn ((_, n, _, _, _), dontName) => IS.add (dontName, n)) dontName vis | _ => dontName else - dontName) IS.empty (#decls file) + dontName) IS.empty (#1 file) val (ds, _) = ListUtil.foldlMapConcat (fn (d, nextName) => let @@ -126,9 +126,9 @@ fun rewrite file = val vs = freeVars e' val vs = IS.listItems vs - + val x = "script" ^ Int.toString nextName - + val un = (TRecord [], loc) val s = (TFfi ("Basis", "string"), loc) val base = (TFun (un, s), loc) @@ -165,9 +165,9 @@ fun rewrite file = DValRec vis => [(DValRec (vis @ newDs), #2 d)] | _ => List.revAppend (map (fn vi => (DVal vi, #2 d)) newDs, [d]), nextName) - end) (U.File.maxName file + 1) (#decls file) + end) (U.File.maxName file + 1) (#1 file) in - {decls = ds, sideInfo = #sideInfo file} + (ds, #2 file) end end diff --git a/src/pathcheck.sml b/src/pathcheck.sml index 2de3b544..3533032e 100644 --- a/src/pathcheck.sml +++ b/src/pathcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -68,7 +68,7 @@ fun checkDecl ((d, loc), (funcs, rels, cookies, styles)) = in case d of DExport (_, s, _, _, _, _) => doFunc s - + | DTable (s, _, pe, ce) => let fun constraints (e, rels) = @@ -110,6 +110,6 @@ fun checkDecl ((d, loc), (funcs, rels, cookies, styles)) = | _ => (funcs, rels, cookies, styles) end -fun check (file : file) = ignore (foldl checkDecl (SS.empty, SS.empty, SS.empty, SS.empty) (#decls file)) +fun check (ds, _) = ignore (foldl checkDecl (SS.empty, SS.empty, SS.empty, SS.empty) ds) end diff --git a/src/scriptcheck.sml b/src/scriptcheck.sml index d1e893dd..0d30ebcb 100644 --- a/src/scriptcheck.sml +++ b/src/scriptcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -91,7 +91,7 @@ fun dump (r : rpcmap) = dump r')) m; print "\n") -fun classify {decls = ds, sideInfo = ps} = +fun classify (ds, ps) = let val proto = Settings.currentProtocol () @@ -100,7 +100,7 @@ fun classify {decls = ds, sideInfo = ps} = fun hasClient {basis, rpcs, funcs, push} = MonoUtil.Exp.exists {typ = fn _ => false, exp = fn ERecv _ => push - | EFfiApp ("Basis", x, _) => SS.member (basis, x) + | EFfiApp ("Basis", x, _) => SS.member (basis, x) | EJavaScript _ => not push | ENamed n => IS.member (funcs, n) | EServerCall (e, _, _, _) => @@ -175,7 +175,8 @@ fun classify {decls = ds, sideInfo = ps} = else ServerOnly, AnyDb)) (IS.listItems all_ids) in - {decls = ds, sideInfo = ps} + (ds, ps) end end + diff --git a/src/sigcheck.sml b/src/sigcheck.sml index 35302bae..a6ed7653 100644 --- a/src/sigcheck.sml +++ b/src/sigcheck.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -32,7 +32,7 @@ open Mono structure IS = IntBinarySet structure E = ErrorMsg -fun check (file : file) = +fun check (ds, sl) = let fun isSiggy siggers = MonoUtil.Decl.exists {typ = fn _ => false, @@ -89,9 +89,9 @@ fun check (file : file) = (sigify sigdecs d, (siggers, sigdecs)) | _ => (sigify sigdecs d, (siggers, sigdecs)) - val (ds, _) = ListUtil.foldlMap doDecl (IS.empty, IS.empty) (#decls file) + val (ds, _) = ListUtil.foldlMap doDecl (IS.empty, IS.empty) ds in - {decls = ds, sideInfo = #sideInfo file} + (ds, sl) end end diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 40081351..8efe999c 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -101,7 +101,7 @@ fun effectful (effs : IS.set) = end (* TODO: test this. *) -fun effectfulDecls ({decls, ...} : file) = +fun effectfulDecls (decls, _) = let fun doVal ((_, name, _, e, _), effs) = if effectful effs MonoEnv.empty e diff --git a/src/untangle.sml b/src/untangle.sml index 8ed9c8f6..bcb90ed6 100644 --- a/src/untangle.sml +++ b/src/untangle.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -208,7 +208,7 @@ fun untangle (file : file) = end | _ => [dAll] in - {decls = ListUtil.mapConcat decl (#decls file), sideInfo = #sideInfo file} + (ListUtil.mapConcat decl (#1 file), #2 file) end end -- cgit v1.2.3 From 287683a7a940849ab734acd4ba7fad3c60b7b5f7 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 21 Sep 2015 14:54:07 -0400 Subject: Factor out [Monoize.Fm] to make it accessible to [Sqlcache]. --- src/mono_fm.sig | 22 ++++++++++ src/mono_fm.sml | 115 ++++++++++++++++++++++++++++++++++++++++++++++++ src/monoize.sml | 134 ++++---------------------------------------------------- src/sources | 3 ++ 4 files changed, 148 insertions(+), 126 deletions(-) create mode 100644 src/mono_fm.sig create mode 100644 src/mono_fm.sml (limited to 'src') diff --git a/src/mono_fm.sig b/src/mono_fm.sig new file mode 100644 index 00000000..a72a5da7 --- /dev/null +++ b/src/mono_fm.sig @@ -0,0 +1,22 @@ +signature MONO_FM = sig + type t + + type vr = string * int * Mono.typ * Mono.exp * string + + datatype foo_kind = + Attr + | Url + + val empty : int -> t + + val lookup : t -> foo_kind -> int -> (int -> t -> vr * t) -> t * int + val lookupList : t -> foo_kind -> Mono.typ -> (int -> t -> vr * t) -> t * int + val enter : t -> t + val decls : t -> Mono.decl list + + val freshName : t -> int * t + + (* TODO: don't expose raw references if possible. *) + val nextPvar : int ref + val postMonoize : t ref +end diff --git a/src/mono_fm.sml b/src/mono_fm.sml new file mode 100644 index 00000000..d7e9e001 --- /dev/null +++ b/src/mono_fm.sml @@ -0,0 +1,115 @@ +(* TODO: better name than "fm"? *) +structure MonoFm : MONO_FM = struct + +open Mono + +type vr = string * int * typ * exp * string + +datatype foo_kind = + Attr + | Url + +structure IM = IntBinaryMap + +structure M = BinaryMapFn(struct + type ord_key = foo_kind + fun compare x = + case x of + (Attr, Attr) => EQUAL + | (Attr, _) => LESS + | (_, Attr) => GREATER + + | (Url, Url) => EQUAL + end) + +structure TM = BinaryMapFn(struct + type ord_key = typ + val compare = MonoUtil.Typ.compare + end) + +type t = { + count : int, + map : int IM.map M.map, + listMap : int TM.map M.map, + decls : vr list +} + +val nextPvar = ref 0 + +fun empty count = { + count = count, + map = M.empty, + listMap = M.empty, + decls = [] +} + +fun chooseNext count = + let + val n = !nextPvar + in + if count < n then + (count, count+1) + else + (nextPvar := n + 1; + (n, n+1)) + end + +fun enter ({count, map, listMap, ...} : t) = {count = count, map = map, listMap = listMap, decls = []} +fun freshName {count, map, listMap, decls} = + let + val (next, count) = chooseNext count + in + (next, {count = count , map = map, listMap = listMap, decls = decls}) + end +fun decls ({decls, ...} : t) = + case decls of + [] => [] + | _ => [(DValRec decls, ErrorMsg.dummySpan)] + +fun lookup (t as {count, map, listMap, decls}) k n thunk = + let + val im = Option.getOpt (M.find (map, k), IM.empty) + in + case IM.find (im, n) of + NONE => + let + val n' = count + val (d, {count, map, listMap, decls}) = + thunk count {count = count + 1, + map = M.insert (map, k, IM.insert (im, n, n')), + listMap = listMap, + decls = decls} + in + ({count = count, + map = map, + listMap = listMap, + decls = d :: decls}, n') + end + | SOME n' => (t, n') + end + +fun lookupList (t as {count, map, listMap, decls}) k tp thunk = + let + val tm = Option.getOpt (M.find (listMap, k), TM.empty) + in + case TM.find (tm, tp) of + NONE => + let + val n' = count + val (d, {count, map, listMap, decls}) = + thunk count {count = count + 1, + map = map, + listMap = M.insert (listMap, k, TM.insert (tm, tp, n')), + decls = decls} + in + ({count = count, + map = map, + listMap = listMap, + decls = d :: decls}, n') + end + | SOME n' => (t, n') + end + +val postMonoize : t ref = ref (empty 0) + +end diff --git a/src/monoize.sml b/src/monoize.sml index d8c4d276..4bd3aff2 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -50,7 +50,7 @@ structure RM = BinaryMapFn(struct (L'.TRecord r2, E.dummySpan)) end) -val nextPvar = ref 0 +val nextPvar = MonoFm.nextPvar val pvars = ref (RM.empty : (int * (string * int * L'.typ) list) RM.map) val pvarDefs = ref ([] : (string * int * (string * int * L'.typ option) list) list) val pvarOldDefs = ref ([] : (int * (string * int * L.con option) list) list) @@ -374,131 +374,12 @@ fun monoType env = val dummyExp = (L'.EPrim (Prim.Int 0), E.dummySpan) -structure IM = IntBinaryMap - -datatype foo_kind = - Attr - | Url +structure Fm = MonoFm fun fk2s fk = case fk of - Attr => "attr" - | Url => "url" - -type vr = string * int * L'.typ * L'.exp * string - -structure Fm :> sig - type t - - val empty : int -> t - - val lookup : t -> foo_kind -> int -> (int -> t -> vr * t) -> t * int - val lookupList : t -> foo_kind -> L'.typ -> (int -> t -> vr * t) -> t * int - val enter : t -> t - val decls : t -> L'.decl list - - val freshName : t -> int * t -end = struct - -structure M = BinaryMapFn(struct - type ord_key = foo_kind - fun compare x = - case x of - (Attr, Attr) => EQUAL - | (Attr, _) => LESS - | (_, Attr) => GREATER - - | (Url, Url) => EQUAL - end) - -structure TM = BinaryMapFn(struct - type ord_key = L'.typ - val compare = MonoUtil.Typ.compare - end) - -type t = { - count : int, - map : int IM.map M.map, - listMap : int TM.map M.map, - decls : vr list -} - -fun empty count = { - count = count, - map = M.empty, - listMap = M.empty, - decls = [] -} - -fun chooseNext count = - let - val n = !nextPvar - in - if count < n then - (count, count+1) - else - (nextPvar := n + 1; - (n, n+1)) - end - -fun enter ({count, map, listMap, ...} : t) = {count = count, map = map, listMap = listMap, decls = []} -fun freshName {count, map, listMap, decls} = - let - val (next, count) = chooseNext count - in - (next, {count = count , map = map, listMap = listMap, decls = decls}) - end -fun decls ({decls, ...} : t) = - case decls of - [] => [] - | _ => [(L'.DValRec decls, ErrorMsg.dummySpan)] - -fun lookup (t as {count, map, listMap, decls}) k n thunk = - let - val im = Option.getOpt (M.find (map, k), IM.empty) - in - case IM.find (im, n) of - NONE => - let - val n' = count - val (d, {count, map, listMap, decls}) = - thunk count {count = count + 1, - map = M.insert (map, k, IM.insert (im, n, n')), - listMap = listMap, - decls = decls} - in - ({count = count, - map = map, - listMap = listMap, - decls = d :: decls}, n') - end - | SOME n' => (t, n') - end - -fun lookupList (t as {count, map, listMap, decls}) k tp thunk = - let - val tm = Option.getOpt (M.find (listMap, k), TM.empty) - in - case TM.find (tm, tp) of - NONE => - let - val n' = count - val (d, {count, map, listMap, decls}) = - thunk count {count = count + 1, - map = map, - listMap = M.insert (listMap, k, TM.insert (tm, tp, n')), - decls = decls} - in - ({count = count, - map = map, - listMap = listMap, - decls = d :: decls}, n') - end - | SOME n' => (t, n') - end - -end - + Fm.Attr => "attr" + | Fm.Url => "url" fun capitalize s = if s = "" then @@ -677,8 +558,8 @@ fun fooifyExp fk env = fooify end -val attrifyExp = fooifyExp Attr -val urlifyExp = fooifyExp Url +val attrifyExp = fooifyExp Fm.Attr +val urlifyExp = fooifyExp Fm.Url val urlifiedUnit = let @@ -4738,7 +4619,7 @@ fun monoize env file = val mname = CoreUtil.File.maxName file + 1 val () = nextPvar := mname - val (_, _, ds) = List.foldl (fn (d, (env, fm, ds)) => + val (_, fm, ds) = List.foldl (fn (d, (env, fm, ds)) => case #1 d of L.DDatabase s => let @@ -4786,6 +4667,7 @@ fun monoize env file = pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; + Fm.postMonoize := fm; (rev ds, []) end diff --git a/src/sources b/src/sources index f0914bdf..e6a361ce 100644 --- a/src/sources +++ b/src/sources @@ -168,6 +168,9 @@ $(SRC)/mono_env.sml $(SRC)/mono_print.sig $(SRC)/mono_print.sml +$(SRC)/mono_fm.sig +$(SRC)/mono_fm.sml + $(SRC)/sql.sig $(SRC)/sql.sml -- cgit v1.2.3 From 59c69b0cebc215599acc25906bd0366af03abf0c Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 21 Sep 2015 16:07:35 -0400 Subject: Factor out urlification. --- src/mono_fm.sig | 22 ---- src/mono_fm.sml | 115 ------------------- src/mono_fooify.sig | 38 +++++++ src/mono_fooify.sml | 317 ++++++++++++++++++++++++++++++++++++++++++++++++++++ src/monoize.sml | 206 ++++------------------------------ src/sources | 4 +- 6 files changed, 377 insertions(+), 325 deletions(-) delete mode 100644 src/mono_fm.sig delete mode 100644 src/mono_fm.sml create mode 100644 src/mono_fooify.sig create mode 100644 src/mono_fooify.sml (limited to 'src') diff --git a/src/mono_fm.sig b/src/mono_fm.sig deleted file mode 100644 index a72a5da7..00000000 --- a/src/mono_fm.sig +++ /dev/null @@ -1,22 +0,0 @@ -signature MONO_FM = sig - type t - - type vr = string * int * Mono.typ * Mono.exp * string - - datatype foo_kind = - Attr - | Url - - val empty : int -> t - - val lookup : t -> foo_kind -> int -> (int -> t -> vr * t) -> t * int - val lookupList : t -> foo_kind -> Mono.typ -> (int -> t -> vr * t) -> t * int - val enter : t -> t - val decls : t -> Mono.decl list - - val freshName : t -> int * t - - (* TODO: don't expose raw references if possible. *) - val nextPvar : int ref - val postMonoize : t ref -end diff --git a/src/mono_fm.sml b/src/mono_fm.sml deleted file mode 100644 index d7e9e001..00000000 --- a/src/mono_fm.sml +++ /dev/null @@ -1,115 +0,0 @@ -(* TODO: better name than "fm"? *) -structure MonoFm : MONO_FM = struct - -open Mono - -type vr = string * int * typ * exp * string - -datatype foo_kind = - Attr - | Url - -structure IM = IntBinaryMap - -structure M = BinaryMapFn(struct - type ord_key = foo_kind - fun compare x = - case x of - (Attr, Attr) => EQUAL - | (Attr, _) => LESS - | (_, Attr) => GREATER - - | (Url, Url) => EQUAL - end) - -structure TM = BinaryMapFn(struct - type ord_key = typ - val compare = MonoUtil.Typ.compare - end) - -type t = { - count : int, - map : int IM.map M.map, - listMap : int TM.map M.map, - decls : vr list -} - -val nextPvar = ref 0 - -fun empty count = { - count = count, - map = M.empty, - listMap = M.empty, - decls = [] -} - -fun chooseNext count = - let - val n = !nextPvar - in - if count < n then - (count, count+1) - else - (nextPvar := n + 1; - (n, n+1)) - end - -fun enter ({count, map, listMap, ...} : t) = {count = count, map = map, listMap = listMap, decls = []} -fun freshName {count, map, listMap, decls} = - let - val (next, count) = chooseNext count - in - (next, {count = count , map = map, listMap = listMap, decls = decls}) - end -fun decls ({decls, ...} : t) = - case decls of - [] => [] - | _ => [(DValRec decls, ErrorMsg.dummySpan)] - -fun lookup (t as {count, map, listMap, decls}) k n thunk = - let - val im = Option.getOpt (M.find (map, k), IM.empty) - in - case IM.find (im, n) of - NONE => - let - val n' = count - val (d, {count, map, listMap, decls}) = - thunk count {count = count + 1, - map = M.insert (map, k, IM.insert (im, n, n')), - listMap = listMap, - decls = decls} - in - ({count = count, - map = map, - listMap = listMap, - decls = d :: decls}, n') - end - | SOME n' => (t, n') - end - -fun lookupList (t as {count, map, listMap, decls}) k tp thunk = - let - val tm = Option.getOpt (M.find (listMap, k), TM.empty) - in - case TM.find (tm, tp) of - NONE => - let - val n' = count - val (d, {count, map, listMap, decls}) = - thunk count {count = count + 1, - map = map, - listMap = M.insert (listMap, k, TM.insert (tm, tp, n')), - decls = decls} - in - ({count = count, - map = map, - listMap = listMap, - decls = d :: decls}, n') - end - | SOME n' => (t, n') - end - -val postMonoize : t ref = ref (empty 0) - -end diff --git a/src/mono_fooify.sig b/src/mono_fooify.sig new file mode 100644 index 00000000..9eb8038b --- /dev/null +++ b/src/mono_fooify.sig @@ -0,0 +1,38 @@ +signature MONO_FOOIFY = sig + +(* TODO: don't expose raw references if possible. *) +val nextPvar : int ref +val pvarDefs : ((string * int * (string * int * Mono.typ option) list) list) ref + +datatype foo_kind = Attr | Url + +structure Fm : sig + type t + + type vr = string * int * Mono.typ * Mono.exp * string + + val empty : int -> t + + val lookup : t -> foo_kind -> int -> (int -> t -> vr * t) -> t * int + val lookupList : t -> foo_kind -> Mono.typ -> (int -> t -> vr * t) -> t * int + val enter : t -> t + val decls : t -> Mono.decl list + + val freshName : t -> int * t + + (* Set at the end of [Monoize]. *) + val canonical : t ref +end + +(* General form used in [Monoize]. *) +val fooifyExp : foo_kind + -> (int -> Mono.typ * string) + -> (int -> string * (string * int * Mono.typ option) list) + -> Fm.t + -> Mono.exp * Mono.typ + -> Mono.exp * Fm.t + +(* Easy-to-use special case used in [Sqlcache]. *) +val urlify : MonoEnv.env -> Mono.exp * Mono.typ -> Mono.exp + +end diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml new file mode 100644 index 00000000..d7cb9f59 --- /dev/null +++ b/src/mono_fooify.sml @@ -0,0 +1,317 @@ +structure MonoFooify :> MONO_FOOIFY = struct + +open Mono + +datatype foo_kind = + Attr + | Url + +val nextPvar = ref 0 +val pvarDefs = ref ([] : (string * int * (string * int * typ option) list) list) + +structure Fm = struct + +type vr = string * int * typ * exp * string + +structure IM = IntBinaryMap + +structure M = BinaryMapFn(struct + type ord_key = foo_kind + fun compare x = + case x of + (Attr, Attr) => EQUAL + | (Attr, _) => LESS + | (_, Attr) => GREATER + + | (Url, Url) => EQUAL + end) + +structure TM = BinaryMapFn(struct + type ord_key = typ + val compare = MonoUtil.Typ.compare + end) + +type t = { + count : int, + map : int IM.map M.map, + listMap : int TM.map M.map, + decls : vr list +} + +fun empty count = { + count = count, + map = M.empty, + listMap = M.empty, + decls = [] +} + +fun chooseNext count = + let + val n = !nextPvar + in + if count < n then + (count, count+1) + else + (nextPvar := n + 1; + (n, n+1)) + end + +fun enter ({count, map, listMap, ...} : t) = {count = count, map = map, listMap = listMap, decls = []} +fun freshName {count, map, listMap, decls} = + let + val (next, count) = chooseNext count + in + (next, {count = count , map = map, listMap = listMap, decls = decls}) + end +fun decls ({decls, ...} : t) = + case decls of + [] => [] + | _ => [(DValRec decls, ErrorMsg.dummySpan)] + +fun lookup (t as {count, map, listMap, decls}) k n thunk = + let + val im = Option.getOpt (M.find (map, k), IM.empty) + in + case IM.find (im, n) of + NONE => + let + val n' = count + val (d, {count, map, listMap, decls}) = + thunk count {count = count + 1, + map = M.insert (map, k, IM.insert (im, n, n')), + listMap = listMap, + decls = decls} + in + ({count = count, + map = map, + listMap = listMap, + decls = d :: decls}, n') + end + | SOME n' => (t, n') + end + +fun lookupList (t as {count, map, listMap, decls}) k tp thunk = + let + val tm = Option.getOpt (M.find (listMap, k), TM.empty) + in + case TM.find (tm, tp) of + NONE => + let + val n' = count + val (d, {count, map, listMap, decls}) = + thunk count {count = count + 1, + map = map, + listMap = M.insert (listMap, k, TM.insert (tm, tp, n')), + decls = decls} + in + ({count = count, + map = map, + listMap = listMap, + decls = d :: decls}, n') + end + | SOME n' => (t, n') + end + +(* Has to be set at the end of [Monoize]. *) +val canonical = ref (empty 0 : t) + +end + +fun fk2s fk = + case fk of + Attr => "attr" + | Url => "url" + +fun capitalize s = + if s = "" then + s + else + str (Char.toUpper (String.sub (s, 0))) ^ String.extract (s, 1, NONE) + +structure E = ErrorMsg + +val dummyExp = (EPrim (Prim.Int 0), E.dummySpan) + +fun fooifyExp fk lookupENamed lookupDatatype = + let + fun fooify fm (e, tAll as (t, loc)) = + case #1 e of + EClosure (fnam, [(ERecord [], _)]) => + let + val (_, s) = lookupENamed fnam + in + ((EPrim (Prim.String (Prim.Normal, Settings.getUrlPrefix () ^ s)), loc), fm) + end + | EClosure (fnam, args) => + let + val (ft, s) = lookupENamed fnam + fun attrify (args, ft, e, fm) = + case (args, ft) of + ([], _) => (e, fm) + | (arg :: args, (TFun (t, ft), _)) => + let + val (arg', fm) = fooify fm (arg, t) + in + attrify (args, ft, + (EStrcat (e, + (EStrcat ((EPrim (Prim.String (Prim.Normal, "/")), loc), + arg'), loc)), loc), + fm) + end + | _ => (E.errorAt loc "Type mismatch encoding attribute"; + (e, fm)) + in + attrify (args, ft, (EPrim (Prim.String (Prim.Normal, Settings.getUrlPrefix () ^ s)), loc), fm) + end + | _ => + case t of + TFfi ("Basis", "unit") => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) + | TFfi (m, x) => ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm) + + | TRecord [] => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) + | TRecord ((x, t) :: xts) => + let + val (se, fm) = fooify fm ((EField (e, x), loc), t) + in + foldl (fn ((x, t), (se, fm)) => + let + val (se', fm) = fooify fm ((EField (e, x), loc), t) + in + ((EStrcat (se, + (EStrcat ((EPrim (Prim.String (Prim.Normal, "/")), loc), + se'), loc)), loc), + fm) + end) (se, fm) xts + end + + | TDatatype (i, ref (dk, _)) => + let + fun makeDecl n fm = + let + val (x, xncs) = + case ListUtil.search (fn (x, i', xncs) => + if i' = i then + SOME (x, xncs) + else + NONE) (!pvarDefs) of + NONE => lookupDatatype i + | SOME v => v + + val (branches, fm) = + ListUtil.foldlMap + (fn ((x, n, to), fm) => + case to of + NONE => + (((PCon (dk, PConVar n, NONE), loc), + (EPrim (Prim.String (Prim.Normal, x)), loc)), + fm) + | SOME t => + let + val (arg, fm) = fooify fm ((ERel 0, loc), t) + in + (((PCon (dk, PConVar n, SOME (PVar ("a", t), loc)), loc), + (EStrcat ((EPrim (Prim.String (Prim.Normal, x ^ "/")), loc), + arg), loc)), + fm) + end) + fm xncs + + val dom = tAll + val ran = (TFfi ("Basis", "string"), loc) + in + ((fk2s fk ^ "ify_" ^ x, + n, + (TFun (dom, ran), loc), + (EAbs ("x", + dom, + ran, + (ECase ((ERel 0, loc), + branches, + {disc = dom, + result = ran}), loc)), loc), + ""), + fm) + end + + val (fm, n) = Fm.lookup fm fk i makeDecl + in + ((EApp ((ENamed n, loc), e), loc), fm) + end + + | TOption t => + let + val (body, fm) = fooify fm ((ERel 0, loc), t) + in + ((ECase (e, + [((PNone t, loc), + (EPrim (Prim.String (Prim.Normal, "None")), loc)), + + ((PSome (t, (PVar ("x", t), loc)), loc), + (EStrcat ((EPrim (Prim.String (Prim.Normal, "Some/")), loc), + body), loc))], + {disc = tAll, + result = (TFfi ("Basis", "string"), loc)}), loc), + fm) + end + + | TList t => + let + fun makeDecl n fm = + let + val rt = (TRecord [("1", t), ("2", (TList t, loc))], loc) + val (arg, fm) = fooify fm ((ERel 0, loc), rt) + + val branches = [((PNone rt, loc), + (EPrim (Prim.String (Prim.Normal, "Nil")), loc)), + ((PSome (rt, (PVar ("a", rt), loc)), loc), + (EStrcat ((EPrim (Prim.String (Prim.Normal, "Cons/")), loc), + arg), loc))] + + val dom = tAll + val ran = (TFfi ("Basis", "string"), loc) + in + ((fk2s fk ^ "ify_list", + n, + (TFun (dom, ran), loc), + (EAbs ("x", + dom, + ran, + (ECase ((ERel 0, loc), + branches, + {disc = dom, + result = ran}), loc)), loc), + ""), + fm) + end + + val (fm, n) = Fm.lookupList fm fk t makeDecl + in + ((EApp ((ENamed n, loc), e), loc), fm) + end + + | _ => (E.errorAt loc "Don't know how to encode attribute/URL type"; + Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty tAll)]; + (dummyExp, fm)) + in + fooify + end + +fun urlify env expTyp = + let + val (exp, fm) = + fooifyExp + Url + (fn n => + let + val (_, t, _, s) = MonoEnv.lookupENamed env n + in + (t, s) + end) + (fn n => MonoEnv.lookupDatatype env n) + (!Fm.canonical) + expTyp + in + Fm.canonical := fm; + exp + end +end diff --git a/src/monoize.sml b/src/monoize.sml index 4bd3aff2..f92d7511 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -50,9 +50,9 @@ structure RM = BinaryMapFn(struct (L'.TRecord r2, E.dummySpan)) end) -val nextPvar = MonoFm.nextPvar +val nextPvar = MonoFooify.nextPvar val pvars = ref (RM.empty : (int * (string * int * L'.typ) list) RM.map) -val pvarDefs = ref ([] : (string * int * (string * int * L'.typ option) list) list) +val pvarDefs = MonoFooify.pvarDefs val pvarOldDefs = ref ([] : (int * (string * int * L.con option) list) list) fun choosePvar () = @@ -374,192 +374,26 @@ fun monoType env = val dummyExp = (L'.EPrim (Prim.Int 0), E.dummySpan) -structure Fm = MonoFm - -fun fk2s fk = - case fk of - Fm.Attr => "attr" - | Fm.Url => "url" - -fun capitalize s = - if s = "" then - s - else - str (Char.toUpper (String.sub (s, 0))) ^ String.extract (s, 1, NONE) +structure Fm = MonoFooify.Fm fun fooifyExp fk env = - let - fun fooify fm (e, tAll as (t, loc)) = - case #1 e of - L'.EClosure (fnam, [(L'.ERecord [], _)]) => - let - val (_, _, _, s) = Env.lookupENamed env fnam - in - ((L'.EPrim (Prim.String (Prim.Normal, Settings.getUrlPrefix () ^ s)), loc), fm) - end - | L'.EClosure (fnam, args) => - let - val (_, ft, _, s) = Env.lookupENamed env fnam - val ft = monoType env ft - - fun attrify (args, ft, e, fm) = - case (args, ft) of - ([], _) => (e, fm) - | (arg :: args, (L'.TFun (t, ft), _)) => - let - val (arg', fm) = fooify fm (arg, t) - in - attrify (args, ft, - (L'.EStrcat (e, - (L'.EStrcat ((L'.EPrim (Prim.String (Prim.Normal, "/")), loc), - arg'), loc)), loc), - fm) - end - | _ => (E.errorAt loc "Type mismatch encoding attribute"; - (e, fm)) - in - attrify (args, ft, (L'.EPrim (Prim.String (Prim.Normal, Settings.getUrlPrefix () ^ s)), loc), fm) - end - | _ => - case t of - L'.TFfi ("Basis", "unit") => ((L'.EPrim (Prim.String (Prim.Normal, "")), loc), fm) - | L'.TFfi (m, x) => ((L'.EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm) - - | L'.TRecord [] => ((L'.EPrim (Prim.String (Prim.Normal, "")), loc), fm) - | L'.TRecord ((x, t) :: xts) => - let - val (se, fm) = fooify fm ((L'.EField (e, x), loc), t) - in - foldl (fn ((x, t), (se, fm)) => - let - val (se', fm) = fooify fm ((L'.EField (e, x), loc), t) - in - ((L'.EStrcat (se, - (L'.EStrcat ((L'.EPrim (Prim.String (Prim.Normal, "/")), loc), - se'), loc)), loc), - fm) - end) (se, fm) xts - end - - | L'.TDatatype (i, ref (dk, _)) => - let - fun makeDecl n fm = - let - val (x, xncs) = - case ListUtil.search (fn (x, i', xncs) => - if i' = i then - SOME (x, xncs) - else - NONE) (!pvarDefs) of - NONE => - let - val (x, _, xncs) = Env.lookupDatatype env i - in - (x, map (fn (x, n, c) => (x, n, Option.map (monoType env) c)) xncs) - end - | SOME v => v - - val (branches, fm) = - ListUtil.foldlMap - (fn ((x, n, to), fm) => - case to of - NONE => - (((L'.PCon (dk, L'.PConVar n, NONE), loc), - (L'.EPrim (Prim.String (Prim.Normal, x)), loc)), - fm) - | SOME t => - let - val (arg, fm) = fooify fm ((L'.ERel 0, loc), t) - in - (((L'.PCon (dk, L'.PConVar n, SOME (L'.PVar ("a", t), loc)), loc), - (L'.EStrcat ((L'.EPrim (Prim.String (Prim.Normal, x ^ "/")), loc), - arg), loc)), - fm) - end) - fm xncs - - val dom = tAll - val ran = (L'.TFfi ("Basis", "string"), loc) - in - ((fk2s fk ^ "ify_" ^ x, - n, - (L'.TFun (dom, ran), loc), - (L'.EAbs ("x", - dom, - ran, - (L'.ECase ((L'.ERel 0, loc), - branches, - {disc = dom, - result = ran}), loc)), loc), - ""), - fm) - end - - val (fm, n) = Fm.lookup fm fk i makeDecl - in - ((L'.EApp ((L'.ENamed n, loc), e), loc), fm) - end - - | L'.TOption t => - let - val (body, fm) = fooify fm ((L'.ERel 0, loc), t) - in - ((L'.ECase (e, - [((L'.PNone t, loc), - (L'.EPrim (Prim.String (Prim.Normal, "None")), loc)), - - ((L'.PSome (t, (L'.PVar ("x", t), loc)), loc), - (L'.EStrcat ((L'.EPrim (Prim.String (Prim.Normal, "Some/")), loc), - body), loc))], - {disc = tAll, - result = (L'.TFfi ("Basis", "string"), loc)}), loc), - fm) - end - - | L'.TList t => - let - fun makeDecl n fm = - let - val rt = (L'.TRecord [("1", t), ("2", (L'.TList t, loc))], loc) - val (arg, fm) = fooify fm ((L'.ERel 0, loc), rt) - - val branches = [((L'.PNone rt, loc), - (L'.EPrim (Prim.String (Prim.Normal, "Nil")), loc)), - ((L'.PSome (rt, (L'.PVar ("a", rt), loc)), loc), - (L'.EStrcat ((L'.EPrim (Prim.String (Prim.Normal, "Cons/")), loc), - arg), loc))] - - val dom = tAll - val ran = (L'.TFfi ("Basis", "string"), loc) - in - ((fk2s fk ^ "ify_list", - n, - (L'.TFun (dom, ran), loc), - (L'.EAbs ("x", - dom, - ran, - (L'.ECase ((L'.ERel 0, loc), - branches, - {disc = dom, - result = ran}), loc)), loc), - ""), - fm) - end - - val (fm, n) = Fm.lookupList fm fk t makeDecl - in - ((L'.EApp ((L'.ENamed n, loc), e), loc), fm) - end - - | _ => (E.errorAt loc "Don't know how to encode attribute/URL type"; - Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty tAll)]; - (dummyExp, fm)) - in - fooify - end + MonoFooify.fooifyExp + fk + (fn n => + let + val (_, t, _, s) = Env.lookupENamed env n + in + (monoType env t, s) + end) + (fn n => + let + val (x, _, xncs) = Env.lookupDatatype env n + in + (x, map (fn (x, n, c) => (x, n, Option.map (monoType env) c)) xncs) + end) -val attrifyExp = fooifyExp Fm.Attr -val urlifyExp = fooifyExp Fm.Url +val attrifyExp = fooifyExp MonoFooify.Attr +val urlifyExp = fooifyExp MonoFooify.Url val urlifiedUnit = let @@ -4667,7 +4501,7 @@ fun monoize env file = pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; - Fm.postMonoize := fm; + Fm.canonical := fm; (rev ds, []) end diff --git a/src/sources b/src/sources index e6a361ce..1303b46e 100644 --- a/src/sources +++ b/src/sources @@ -168,8 +168,8 @@ $(SRC)/mono_env.sml $(SRC)/mono_print.sig $(SRC)/mono_print.sml -$(SRC)/mono_fm.sig -$(SRC)/mono_fm.sml +$(SRC)/mono_fooify.sig +$(SRC)/mono_fooify.sml $(SRC)/sql.sig $(SRC)/sql.sml -- cgit v1.2.3 From 97115c5f804824c024a0c08c288889d29f743e64 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 21 Sep 2015 16:45:59 -0400 Subject: Use new refactored urlification in Sqlcache. --- src/cjrize.sml | 2 +- src/iflow.sml | 10 ++++------ src/jscomp.sml | 5 ++--- src/mono.sml | 3 +-- src/mono_opt.sml | 11 ++++------- src/mono_print.sml | 2 +- src/mono_util.sml | 22 +++++++++------------- src/monoize.sig | 2 -- src/monoize.sml | 14 +------------- src/sqlcache.sml | 11 ++++------- 10 files changed, 27 insertions(+), 55 deletions(-) (limited to 'src') diff --git a/src/cjrize.sml b/src/cjrize.sml index b20d6d22..5f6ae4d8 100644 --- a/src/cjrize.sml +++ b/src/cjrize.sml @@ -431,7 +431,7 @@ fun cifyExp (eAll as (e, loc), sm) = | L.EClosure _ => (ErrorMsg.errorAt loc "Nested closure remains in code generation"; (dummye, sm)) - | L.EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | L.EQuery {exps, tables, state, query, body, initial} => let val (exps', sm) = ListUtil.foldlMap (fn ((x, t), sm) => let diff --git a/src/iflow.sml b/src/iflow.sml index b8346baa..f68d8f72 100644 --- a/src/iflow.sml +++ b/src/iflow.sml @@ -1870,15 +1870,14 @@ val namer = MonoUtil.File.map {typ = fn t => t, case e of EDml (e, fm) => nameSubexps (fn (_, e') => (EDml (e', fm), #2 e)) e - | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | EQuery {exps, tables, state, query, body, initial} => nameSubexps (fn (liftBy, e') => (EQuery {exps = exps, tables = tables, state = state, query = e', body = mliftExpInExp liftBy 2 body, - initial = mliftExpInExp liftBy 0 initial, - sqlcacheInfo = sqlcacheInfo}, + initial = mliftExpInExp liftBy 0 initial}, #2 query)) query | _ => e, decl = fn d => d} @@ -2071,12 +2070,11 @@ fun check (file : file) = | ESeq (e1, e2) => (ESeq (doExp env e1, doExp env e2), loc) | ELet (x, t, e1, e2) => (ELet (x, t, doExp env e1, doExp (Unknown :: env) e2), loc) | EClosure (n, es) => (EClosure (n, map (doExp env) es), loc) - | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | EQuery {exps, tables, state, query, body, initial} => (EQuery {exps = exps, tables = tables, state = state, query = doExp env query, body = doExp (Unknown :: Unknown :: env) body, - initial = doExp env initial, - sqlcacheInfo = sqlcacheInfo}, loc) + initial = doExp env initial}, loc) | EDml (e1, mode) => (case parse dml e1 of NONE => () diff --git a/src/jscomp.sml b/src/jscomp.sml index e5f7d234..4c6bf0a9 100644 --- a/src/jscomp.sml +++ b/src/jscomp.sml @@ -1178,7 +1178,7 @@ fun process (file : file) = ((EClosure (n, es), loc), st) end - | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | EQuery {exps, tables, state, query, body, initial} => let val row = exps @ map (fn (x, xts) => (x, (TRecord xts, loc))) tables val row = ListMergeSort.sort (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER) row @@ -1189,8 +1189,7 @@ fun process (file : file) = val (initial, st) = exp outer (initial, st) in ((EQuery {exps = exps, tables = tables, state = state, - query = query, body = body, initial = initial, - sqlcacheInfo = sqlcacheInfo}, loc), st) + query = query, body = body, initial = initial}, loc), st) end | EDml (e, mode) => let diff --git a/src/mono.sml b/src/mono.sml index 5185e48c..b05c3dcc 100644 --- a/src/mono.sml +++ b/src/mono.sml @@ -107,8 +107,7 @@ datatype exp' = state : typ, query : exp, (* exp of string type containing sql query *) body : exp, - initial : exp, - sqlcacheInfo : exp } + initial : exp } | EDml of exp * failure_mode | ENextval of exp | ESetval of exp * exp diff --git a/src/mono_opt.sml b/src/mono_opt.sml index f4cd6895..186f6c62 100644 --- a/src/mono_opt.sml +++ b/src/mono_opt.sml @@ -405,20 +405,18 @@ fun exp e = initial = (EPrim (Prim.String (k, "")), _), body = (EStrcat ((EPrim (Prim.String (_, s)), _), (EStrcat ((ERel 0, _), - e'), _)), _), - sqlcacheInfo}, loc) => + e'), _)), _)}, loc) => if (case k of Prim.Normal => s = "" | Prim.Html => CharVector.all Char.isSpace s) then EQuery {exps = exps, tables = tables, query = query, state = (TRecord [], loc), initial = (ERecord [], loc), - body = (optExp (EWrite e', loc), loc), - sqlcacheInfo = Monoize.urlifiedUnit} + body = (optExp (EWrite e', loc), loc)} else e | EWrite (EQuery {exps, tables, state, query, initial = (EPrim (Prim.String (_, "")), _), - body, sqlcacheInfo}, loc) => + body}, loc) => let fun passLets (depth, (e', _), lets) = case e' of @@ -433,8 +431,7 @@ fun exp e = EQuery {exps = exps, tables = tables, query = query, state = (TRecord [], loc), initial = (ERecord [], loc), - body = body, - sqlcacheInfo = Monoize.urlifiedUnit} + body = body} end else e diff --git a/src/mono_print.sml b/src/mono_print.sml index 0ff51f37..3e498d2c 100644 --- a/src/mono_print.sml +++ b/src/mono_print.sml @@ -310,7 +310,7 @@ fun p_exp' par env (e, _) = p_exp env e]) es, string ")"] - | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | EQuery {exps, tables, state, query, body, initial} => box [string "query[", p_list (fn (x, t) => box [string x, space, string ":", space, p_typ env t]) exps, string "] [", diff --git a/src/mono_util.sml b/src/mono_util.sml index ba10ad32..5d7eb164 100644 --- a/src/mono_util.sml +++ b/src/mono_util.sml @@ -314,7 +314,7 @@ fun mapfoldB {typ = fc, exp = fe, bind} = fn es' => (EClosure (n, es'), loc)) - | EQuery {exps, tables, state, query, body, initial, sqlcacheInfo} => + | EQuery {exps, tables, state, query, body, initial} => S.bind2 (ListUtil.mapfold (fn (x, t) => S.map2 (mft t, fn t' => (x, t'))) exps, @@ -335,19 +335,15 @@ fun mapfoldB {typ = fc, exp = fe, bind} = body, fn body' => (* ASK: is this the right thing to do? *) - S.bind2 (mfe ctx initial, + S.map2 (mfe ctx initial, fn initial' => - S.map2 (mfe (bind (ctx, RelE ("queryResult", dummyt))) - sqlcacheInfo, - fn sqlcacheInfo' => - (EQuery {exps = exps', - tables = tables', - state = state', - query = query', - body = body', - initial = initial', - sqlcacheInfo = sqlcacheInfo}, - loc)))))))) + (EQuery {exps = exps', + tables = tables', + state = state', + query = query', + body = body', + initial = initial'}, + loc))))))) | EDml (e, fm) => S.map2 (mfe ctx e, diff --git a/src/monoize.sig b/src/monoize.sig index 549bf6ee..951db01b 100644 --- a/src/monoize.sig +++ b/src/monoize.sig @@ -31,6 +31,4 @@ signature MONOIZE = sig val liftExpInExp : int -> Mono.exp -> Mono.exp - val urlifiedUnit : Mono.exp - end diff --git a/src/monoize.sml b/src/monoize.sml index f92d7511..8f6b298d 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -395,16 +395,6 @@ fun fooifyExp fk env = val attrifyExp = fooifyExp MonoFooify.Attr val urlifyExp = fooifyExp MonoFooify.Url -val urlifiedUnit = - let - val loc = ErrorMsg.dummySpan - (* Urlifies [ERel 0] to match the [sqlcacheInfo] field of [EQuery]s. *) - val (urlified, _) = urlifyExp CoreEnv.empty (Fm.empty 0) - ((L'.ERel 0, loc), (L'.TRecord [], loc)) - in - urlified - end - datatype 'a failable_search = Found of 'a | NotFound @@ -1687,14 +1677,12 @@ fun monoExp (env, st, fm) (all as (e, loc)) = (L'.ERel 1, loc)), loc), (L'.ERel 0, loc)), loc), (L'.ERecord [], loc)), loc) - val (urlifiedRel0, fm) = urlifyExp env fm ((L'.ERel 0, loc), state) val body = (L'.EQuery {exps = exps, tables = tables, state = state, query = (L'.ERel 3, loc), body = body', - initial = (L'.ERel 1, loc), - sqlcacheInfo = urlifiedRel0}, + initial = (L'.ERel 1, loc)}, loc) in ((L'.EAbs ("q", s, (L'.TFun (ft, (L'.TFun (state, (L'.TFun (un, state), loc)), loc)), loc), diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 8efe999c..6b4216ea 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -493,16 +493,16 @@ fun incRels inc = bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} 0 -fun cacheWrap (query, i, urlifiedRel0, resultTyp, args) = +fun cacheWrap (env, query, i, resultTyp, args) = let val () = ffiInfo := {index = i, params = length args} :: !ffiInfo val loc = dummyLoc + val rel0 = (ERel 0, loc) (* We ensure before this step that all arguments aren't effectful. by turning them into local variables as needed. *) val argsInc = map (incRels 1) args val check = (check (i, args), dummyLoc) - val store = (store (i, argsInc, urlifiedRel0), dummyLoc) - val rel0 = (ERel 0, loc) + val store = (store (i, argsInc, MonoFooify.urlify env (rel0, resultTyp)), dummyLoc) in ECase (check, [((PNone stringTyp, loc), @@ -563,8 +563,6 @@ fun addChecking file = let fun doExp env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = fn e' as EQuery {query = origQueryText, - (* ASK: could this get messed up by inlining? *) - sqlcacheInfo = urlifiedRel0, state = resultTyp, initial, body, tables, exps} => let @@ -572,7 +570,6 @@ fun addChecking file = (* Increment once for each new variable just made. *) val queryExp = incRels numArgs (EQuery {query = newQueryText, - sqlcacheInfo = urlifiedRel0, state = resultTyp, initial = initial, body = body, @@ -599,7 +596,7 @@ fun addChecking file = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( bind (Sql.parse Sql.query queryText) (fn queryParsed => - SOME (wrapLets (cacheWrap (queryExp, index, urlifiedRel0, resultTyp, args)), + SOME (wrapLets (cacheWrap (env, queryExp, index, resultTyp, args)), (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) tableToIndices (tablesQuery queryParsed), -- cgit v1.2.3 From f8d7c70d8f52003e14a66144a48bb4f06a1c185f Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 27 Sep 2015 03:52:14 -0400 Subject: Pure caching sort of works. --- src/mono_env.sig | 4 +- src/mono_env.sml | 4 +- src/mono_fooify.sig | 9 ++- src/mono_fooify.sml | 56 ++++++++++++------ src/monoize.sml | 7 ++- src/sqlcache.sml | 162 +++++++++++++++++++++++++++++++++++++--------------- 6 files changed, 166 insertions(+), 76 deletions(-) (limited to 'src') diff --git a/src/mono_env.sig b/src/mono_env.sig index 97d7d9ea..9805c0d1 100644 --- a/src/mono_env.sig +++ b/src/mono_env.sig @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -42,6 +42,8 @@ signature MONO_ENV = sig val pushERel : env -> string -> Mono.typ -> Mono.exp option -> env val lookupERel : env -> int -> string * Mono.typ * Mono.exp option + val typeContext : env -> Mono.typ list + val pushENamed : env -> string -> int -> Mono.typ -> Mono.exp option -> string -> env val lookupENamed : env -> int -> string * Mono.typ * Mono.exp option * string diff --git a/src/mono_env.sml b/src/mono_env.sml index 7f9a6e62..8617425e 100644 --- a/src/mono_env.sml +++ b/src/mono_env.sml @@ -16,7 +16,7 @@ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN @@ -108,6 +108,8 @@ fun lookupERel (env : env) n = (List.nth (#relE env, n)) handle Subscript => raise UnboundRel n +fun typeContext (env : env) = map #2 (#relE env) + fun pushENamed (env : env) x n t eo s = {datatypes = #datatypes env, constructors = #constructors env, diff --git a/src/mono_fooify.sig b/src/mono_fooify.sig index 9eb8038b..ef8f09c2 100644 --- a/src/mono_fooify.sig +++ b/src/mono_fooify.sig @@ -19,9 +19,6 @@ structure Fm : sig val decls : t -> Mono.decl list val freshName : t -> int * t - - (* Set at the end of [Monoize]. *) - val canonical : t ref end (* General form used in [Monoize]. *) @@ -32,7 +29,9 @@ val fooifyExp : foo_kind -> Mono.exp * Mono.typ -> Mono.exp * Fm.t -(* Easy-to-use special case used in [Sqlcache]. *) -val urlify : MonoEnv.env -> Mono.exp * Mono.typ -> Mono.exp +(* Easy-to-use interface in [Sqlcache]. Uses [Fm.canonical]. *) +val canonicalFm : Fm.t ref (* Set at the end of [Monoize]. *) +val urlify : MonoEnv.env -> Mono.exp * Mono.typ -> Mono.exp option +val getNewFmDecls : unit -> Mono.decl list end diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml index d7cb9f59..2e32b248 100644 --- a/src/mono_fooify.sml +++ b/src/mono_fooify.sml @@ -1,4 +1,4 @@ -structure MonoFooify :> MONO_FOOIFY = struct +structure MonoFooify (* :> MONO_FOOIFY *) = struct open Mono @@ -112,9 +112,6 @@ fun lookupList (t as {count, map, listMap, decls}) k tp thunk = | SOME n' => (t, n') end -(* Has to be set at the end of [Monoize]. *) -val canonical = ref (empty 0 : t) - end fun fk2s fk = @@ -166,7 +163,12 @@ fun fooifyExp fk lookupENamed lookupDatatype = | _ => case t of TFfi ("Basis", "unit") => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) - | TFfi (m, x) => ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm) + | TFfi (m, x) => (if Settings.mayClientToServer (m, x) + (* TODO: better error message. (Then again, user should never see this.) *) + then () + else (E.errorAt loc "MonoFooify: can't pass type from client to server"; + Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty tAll)]); + ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm)) | TRecord [] => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) | TRecord ((x, t) :: xts) => @@ -296,22 +298,38 @@ fun fooifyExp fk lookupENamed lookupDatatype = fooify end +(* Has to be set at the end of [Monoize]. *) +val canonicalFm = ref (Fm.empty 0 : Fm.t) + fun urlify env expTyp = + if ErrorMsg.anyErrors () + then ((* DEBUG *) print "already error"; NONE) + else + let + val (exp, fm) = + fooifyExp + Url + (fn n => + let + val (_, t, _, s) = MonoEnv.lookupENamed env n + in + (t, s) + end) + (fn n => MonoEnv.lookupDatatype env n) + (!canonicalFm) + expTyp + in + if ErrorMsg.anyErrors () + then ((* DEBUG *) print "why"; (ErrorMsg.resetErrors (); NONE)) + else (canonicalFm := fm; SOME exp) + end + +fun getNewFmDecls () = let - val (exp, fm) = - fooifyExp - Url - (fn n => - let - val (_, t, _, s) = MonoEnv.lookupENamed env n - in - (t, s) - end) - (fn n => MonoEnv.lookupDatatype env n) - (!Fm.canonical) - expTyp + val fm = !canonicalFm in - Fm.canonical := fm; - exp + (* canonicalFm := Fm.enter fm; *) + Fm.decls fm end + end diff --git a/src/monoize.sml b/src/monoize.sml index 8f6b298d..4208f594 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -4484,13 +4484,14 @@ fun monoize env file = (L'.DDatatype (dts @ !pvarDefs), loc) :: Fm.decls fm @ ds | _ => ds' @ Fm.decls fm @ (L'.DDatatype (!pvarDefs), loc) :: ds))) - (env, Fm.empty mname, []) file + (env, Fm.empty mname, []) file + val monoFile = (rev ds, []) in pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; - Fm.canonical := fm; - (rev ds, []) + MonoFooify.canonicalFm := Fm.empty (MonoUtil.File.maxName monoFile); + monoFile end end diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 6b4216ea..eaa94685 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -493,27 +493,34 @@ fun incRels inc = bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} 0 -fun cacheWrap (env, query, i, resultTyp, args) = +fun cacheWrap (env, exp, resultTyp, args, i) = let - val () = ffiInfo := {index = i, params = length args} :: !ffiInfo val loc = dummyLoc val rel0 = (ERel 0, loc) - (* We ensure before this step that all arguments aren't effectful. - by turning them into local variables as needed. *) - val argsInc = map (incRels 1) args - val check = (check (i, args), dummyLoc) - val store = (store (i, argsInc, MonoFooify.urlify env (rel0, resultTyp)), dummyLoc) in - ECase (check, - [((PNone stringTyp, loc), - (ELet (varName "q", resultTyp, query, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), - (* Boolean is false because we're not unurlifying from a cookie. *) - (EUnurlify (rel0, resultTyp, false), loc))], - {disc = stringTyp, result = resultTyp}) + case MonoFooify.urlify env (rel0, resultTyp) of + NONE => NONE + | SOME urlified => + let + val () = ffiInfo := {index = i, params = length args} :: !ffiInfo + (* We ensure before this step that all arguments aren't effectful. + by turning them into local variables as needed. *) + val argsInc = map (incRels 1) args + val check = (check (i, args), loc) + val store = (store (i, argsInc, urlified), loc) + in + SOME (ECase + (check, + [((PNone stringTyp, loc), + (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, resultTyp, false), loc))], + {disc = (TOption stringTyp, loc), result = resultTyp})) + end end -fun fileMapfold doExp file start = +fun fileMapfoldB doExp file start = case MonoUtil.File.mapfoldB {typ = Search.return2, exp = fn env => fn e' => fn s => Search.Continue (doExp env e' s), @@ -523,7 +530,7 @@ fun fileMapfold doExp file start = Search.Continue x => x | Search.Return _ => raise Match -fun fileMap doExp file = #1 (fileMapfold (fn _ => fn e => fn _ => (doExp e, ())) file ()) +fun fileMap doExp file = #1 (fileMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) fun factorOutNontrivial text = let @@ -561,6 +568,7 @@ fun factorOutNontrivial text = fun addChecking file = let + val effs = effectfulDecls file fun doExp env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = fn e' as EQuery {query = origQueryText, state = resultTyp, @@ -582,7 +590,6 @@ fun addChecking file = val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE - val effs = effectfulDecls file (* We use dummyTyp here. I think this is okay because databases don't store (effectful) functions, but perhaps there's some pathalogical corner case missing.... *) @@ -596,12 +603,13 @@ fun addChecking file = (* Ziv misses Haskell's do notation.... *) guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( bind (Sql.parse Sql.query queryText) (fn queryParsed => - SOME (wrapLets (cacheWrap (env, queryExp, index, resultTyp, args)), + bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => + SOME (wrapLets cachedExp, (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) tableToIndices (tablesQuery queryParsed), IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1)))) + index + 1))))) in case attempt of SOME pair => pair @@ -609,9 +617,10 @@ fun addChecking file = end | e' => (e', queryInfo) in - fileMapfold (fn env => fn exp => fn state => doExp env state exp) - file - (SIMM.empty, IM.empty, 0) + (fileMapfoldB (fn env => fn exp => fn state => doExp env state exp) + file + (SIMM.empty, IM.empty, 0), + effs) end structure Invalidations = struct @@ -662,7 +671,7 @@ val invalidations = Invalidations.invalidations (* DEBUG *) val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] -fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = +fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = let val flushes = List.concat o map (fn (i, argss) => map (fn args => flush (i, args)) argss) @@ -694,7 +703,7 @@ fun addFlushing (file, (tableToIndices, indexToQueryNumArgs, _)) = in (* DEBUG *) gunk := []; - fileMap doExp file + (fileMap doExp file, index, effs) end val inlineSql = @@ -713,25 +722,11 @@ val inlineSql = fileMap doExp end -fun go file = - let - (* TODO: do something nicer than [Sql] being in one of two modes. *) - val () = (resetFfiInfo (); Sql.sqlcacheMode := true) - val file' = addFlushing (addChecking (inlineSql file)) - val () = Sql.sqlcacheMode := false - in - file' - end - (**********************) (* Mono Type Checking *) (**********************) -val typOfPrim = - fn Prim.Int _ => TFfi ("Basis", "int") - | Prim.Float _ => TFfi ("Basis", "int") - fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = fn EPrim p => SOME (TFfi ("Basis", case p of Prim.Int _ => "int" @@ -779,6 +774,7 @@ fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = | ELet (s, t, e1, e2) => typOfExp (MonoEnv.pushERel env s t (SOME e1)) e2 | EClosure _ => NONE | EUnurlify (_, t, _) => SOME t + | _ => NONE and typOfExp env (e', loc) = typOfExp' env e' @@ -797,17 +793,35 @@ val expOfSubexp = fn Pure f => f () | Impure e => e -val makeCache : MonoEnv.env -> exp' -> exp' = fn _ => fn _ => raise Fail "TODO" - -fun pureCache (effs : IS.set) (env : MonoEnv.env) (exp as (exp', loc)) : subexp = +fun makeCache (env, exp', index) = + case typOfExp' env exp' of + NONE => NONE + | SOME (TFun _, _) => NONE + | SOME typ => + case ListUtil.foldri (fn (_, _, NONE) => NONE + | (n, typ, SOME args) => + case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of + NONE => NONE + | SOME arg => SOME (arg :: args)) + (SOME []) + (MonoEnv.typeContext env) of + NONE => NONE + | SOME args => cacheWrap (env, (exp', dummyLoc), typ, args, index) + +fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int = let fun wrapBindN f (args : (MonoEnv.env * exp) list) = let - val subexps = map (fn (env, exp) => pureCache effs env exp) args + val (subexps, index) = ListUtil.foldlMap (pureCache effs) index args + fun mkExp () = (f (map expOfSubexp subexps), loc) in if List.exists isImpure subexps - then Impure (f (map expOfSubexp subexps), loc) - else Pure (fn () => (makeCache env (f (map #2 args)), loc)) + then (Impure (mkExp ()), index) + else (Pure (fn () => case makeCache (env, f (map #2 args), index) of + NONE => mkExp () + | SOME e' => (e', loc)), + (* Conservatively increment index. *) + index + 1) end fun wrapBind1 f arg = wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] @@ -837,7 +851,8 @@ fun pureCache (effs : IS.set) (env : MonoEnv.env) (exp as (exp', loc)) : subexp wrapBindN (fn (e::es) => ECase (e, (ListPair.map (fn (e, (p, _)) => (p, e)) (es, cases)), - {disc = disc, result = result})) + {disc = disc, result = result}) + | _ => raise Match) ((env, e) :: map (fn (p, e) => (MonoEnv.patBinds env p, e)) cases) | EStrcat (e1, e2) => wrap2 EStrcat (e1, e2) (* We record page writes, so they're cachable. *) @@ -849,8 +864,61 @@ fun pureCache (effs : IS.set) (env : MonoEnv.env) (exp as (exp', loc)) : subexp (* ASK: | EClosure (n, es) => ? *) | EUnurlify (e, t, b) => wrap1 (fn e => EUnurlify (e, t, b)) e | _ => if effectful effs env exp - then Impure exp - else Pure (fn () => (makeCache env exp', loc)) + then (Impure exp, index) + else (Pure (fn () => (case makeCache (env, exp', index) of + NONE => exp' + | SOME e' => e', + loc)), + index + 1) + end + +fun addPure ((decls, sideInfo), index, effs) = + let + fun doVal ((x, n, t, exp, s), index) = + let + val (subexp, index) = pureCache effs ((MonoEnv.empty, exp), index) + in + ((x, n, t, expOfSubexp subexp, s), index) + end + fun doDecl' (decl', index) = + case decl' of + DVal v => + let + val (v, index) = (doVal (v, index)) + in + (DVal v, index) + end + | DValRec vs => + let + val (vs, index) = ListUtil.foldlMap doVal index vs + in + (DValRec vs, index) + end + | _ => (decl', index) + fun doDecl ((decl', loc), index) = + let + val (decl', index) = doDecl' (decl', index) + in + ((decl', loc), index) + end + val decls = #1 (ListUtil.foldlMap doDecl index decls) + (* Important that this happens after the MonoFooify.urlify calls! *) + val fmDecls = MonoFooify.getNewFmDecls () + in + print (Int.toString (length fmDecls)); + (decls @ fmDecls, sideInfo) + end + +val go' = addPure o addFlushing o addChecking o inlineSql + +fun go file = + let + (* TODO: do something nicer than [Sql] being in one of two modes. *) + val () = (resetFfiInfo (); Sql.sqlcacheMode := true) + val file' = go' file + val () = Sql.sqlcacheMode := false + in + file' end end -- cgit v1.2.3 From 067c8cd3b908eb057f6721453a5c3801965d43b8 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 27 Sep 2015 14:46:12 -0400 Subject: Use referenced (rather than all) free variables as keys for pure caches. --- src/mono_env.sig | 2 -- src/mono_env.sml | 2 -- src/sqlcache.sml | 34 ++++++++++++++++++++++++---------- 3 files changed, 24 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/mono_env.sig b/src/mono_env.sig index 9805c0d1..db6fdc95 100644 --- a/src/mono_env.sig +++ b/src/mono_env.sig @@ -42,8 +42,6 @@ signature MONO_ENV = sig val pushERel : env -> string -> Mono.typ -> Mono.exp option -> env val lookupERel : env -> int -> string * Mono.typ * Mono.exp option - val typeContext : env -> Mono.typ list - val pushENamed : env -> string -> int -> Mono.typ -> Mono.exp option -> string -> env val lookupENamed : env -> int -> string * Mono.typ * Mono.exp option * string diff --git a/src/mono_env.sml b/src/mono_env.sml index 8617425e..52e07893 100644 --- a/src/mono_env.sml +++ b/src/mono_env.sml @@ -108,8 +108,6 @@ fun lookupERel (env : env) n = (List.nth (#relE env, n)) handle Subscript => raise UnboundRel n -fun typeContext (env : env) = map #2 (#relE env) - fun pushENamed (env : env) x n t eo s = {datatypes = #datatypes env, constructors = #constructors env, diff --git a/src/sqlcache.sml b/src/sqlcache.sml index eaa94685..fa4a0d22 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -673,8 +673,8 @@ val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = let - val flushes = List.concat o - map (fn (i, argss) => map (fn args => flush (i, args)) argss) + val flushes = List.concat + o map (fn (i, argss) => map (fn args => flush (i, args)) argss) val doExp = fn EDml (origDmlText, failureMode) => let @@ -783,6 +783,18 @@ and typOfExp env (e', loc) = typOfExp' env e' (* Caching Pure Subexpressions *) (*******************************) +val freeVars = + IS.listItems + o MonoUtil.Exp.foldB + {typ = #2, + exp = fn (bound, ERel n, vars) => if n < bound + then vars + else IS.add (vars, n - bound) + | (_, _, vars) => vars, + bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} + 0 + IS.empty + datatype subexp = Pure of unit -> exp | Impure of exp val isImpure = @@ -798,13 +810,14 @@ fun makeCache (env, exp', index) = NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - case ListUtil.foldri (fn (_, _, NONE) => NONE - | (n, typ, SOME args) => - case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of - NONE => NONE - | SOME arg => SOME (arg :: args)) - (SOME []) - (MonoEnv.typeContext env) of + case List.foldr (fn ((_, _), NONE) => NONE + | ((n, typ), SOME args) => + case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of + NONE => NONE + | SOME arg => SOME (arg :: args)) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) + (freeVars (exp', dummyLoc))) of NONE => NONE | SOME args => cacheWrap (env, (exp', dummyLoc), typ, args, index) @@ -906,7 +919,8 @@ fun addPure ((decls, sideInfo), index, effs) = val fmDecls = MonoFooify.getNewFmDecls () in print (Int.toString (length fmDecls)); - (decls @ fmDecls, sideInfo) + (* ASK: fmDecls before or after? *) + (fmDecls @ decls, sideInfo) end val go' = addPure o addFlushing o addChecking o inlineSql -- cgit v1.2.3 From 150e1a3cdc0cfae2f583f7d0185b90d5ee82a018 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 27 Sep 2015 17:02:14 -0400 Subject: Fix bug where pure caching didn't treat FFI applications as effectful. --- src/lru_cache.sml | 8 ++++++- src/sqlcache.sml | 68 ++++++++++++++++++++++++++++++------------------------- 2 files changed, 44 insertions(+), 32 deletions(-) (limited to 'src') diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 0030777f..b8dfde5e 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -13,7 +13,13 @@ val optionStringTyp = (TOption stringTyp, dummyLoc) fun withTyp typ = map (fn exp => (exp, typ)) fun ffiAppCache' (func, index, argTyps) = - EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) + let + val m = "Sqlcache" + val f = func ^ Int.toString index + in + Settings.addEffectful (m, f); + EFfiApp (m, f, argTyps) + end fun check (index, keys) = ffiAppCache' ("check", index, withTyp stringTyp keys) diff --git a/src/sqlcache.sml b/src/sqlcache.sml index fa4a0d22..e2cc01d7 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -26,23 +26,23 @@ fun getFfiInfo () = !ffiInfo val ffiEffectful = (* ASK: how can this be less hard-coded? *) let - val fs = SS.fromList ["htmlifyInt_w", - "htmlifyFloat_w", - "htmlifyString_w", - "htmlifyBool_w", - "htmlifyTime_w", - "attrifyInt_w", - "attrifyFloat_w", - "attrifyString_w", - "attrifyChar_w", - "urlifyInt_w", - "urlifyFloat_w", - "urlifyString_w", - "urlifyBool_w", - "urlifyChannel_w"] + val okayWrites = SS.fromList ["htmlifyInt_w", + "htmlifyFloat_w", + "htmlifyString_w", + "htmlifyBool_w", + "htmlifyTime_w", + "attrifyInt_w", + "attrifyFloat_w", + "attrifyString_w", + "attrifyChar_w", + "urlifyInt_w", + "urlifyFloat_w", + "urlifyString_w", + "urlifyBool_w", + "urlifyChannel_w"] in fn (m, f) => Settings.isEffectful (m, f) - orelse not (m = "Basis" andalso SS.member (fs, f)) + andalso not (m = "Basis" andalso SS.member (okayWrites, f)) end val cache = ref LruCache.cache @@ -548,7 +548,7 @@ fun factorOutNontrivial text = let val n = length newVars in - (* This is the (n + 1)th new variable, so there are + (* This is the (n+1)th new variable, so there are already n new variables bound, so we increment indices by n. *) (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) @@ -586,7 +586,7 @@ fun addChecking file = dummyLoc) val (EQuery {query = queryText, ...}, _) = queryExp (* DEBUG *) - val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty queryText)) + (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) fun bind x f = Option.mapPartial f x fun guard b x = if b then x else NONE @@ -682,7 +682,7 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) (* DEBUG *) - val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) + (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) *) val invs = case Sql.parse Sql.dml dmlText of SOME dmlParsed => @@ -795,6 +795,8 @@ val freeVars = 0 IS.empty +val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 + datatype subexp = Pure of unit -> exp | Impure of exp val isImpure = @@ -810,16 +812,18 @@ fun makeCache (env, exp', index) = NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - case List.foldr (fn ((_, _), NONE) => NONE - | ((n, typ), SOME args) => - case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of - NONE => NONE - | SOME arg => SOME (arg :: args)) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) - (freeVars (exp', dummyLoc))) of - NONE => NONE - | SOME args => cacheWrap (env, (exp', dummyLoc), typ, args, index) + if expSize (exp', dummyLoc) < 5 (* TODO: pick a number. *) + then NONE + else case List.foldr (fn ((_, _), NONE) => NONE + | ((n, typ), SOME args) => + case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of + NONE => NONE + | SOME arg => SOME (arg :: args)) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) + (freeVars (exp', dummyLoc))) of + NONE => NONE + | SOME args => cacheWrap (env, (exp', dummyLoc), typ, args, index) fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int = let @@ -848,8 +852,11 @@ fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int ECon (dk, pc, SOME e) => wrap1 (fn e => ECon (dk, pc, SOME e)) e | ESome (t, e) => wrap1 (fn e => ESome (t, e)) e | EFfiApp (s1, s2, args) => - wrapN (fn es => EFfiApp (s1, s2, ListPair.map (fn (e, (_, t)) => (e, t)) (es, args))) - (map #1 args) + if ffiEffectful (s1, s2) + then (Impure exp, index) + else wrapN (fn es => + EFfiApp (s1, s2, ListPair.map (fn (e, (_, t)) => (e, t)) (es, args))) + (map #1 args) | EApp (e1, e2) => wrap2 EApp (e1, e2) | EAbs (s, t1, t2, e) => wrapBind1 (fn e => EAbs (s, t1, t2, e)) @@ -918,7 +925,6 @@ fun addPure ((decls, sideInfo), index, effs) = (* Important that this happens after the MonoFooify.urlify calls! *) val fmDecls = MonoFooify.getNewFmDecls () in - print (Int.toString (length fmDecls)); (* ASK: fmDecls before or after? *) (fmDecls @ decls, sideInfo) end -- cgit v1.2.3 From 5d00499cabd7c0ddf5eb9e78c883615cb918197e Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 27 Sep 2015 17:24:57 -0400 Subject: Fix effectfulness registration toy cache. --- src/sqlcache.sml | 7 ++++++- src/toy_cache.sml | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index e2cc01d7..1518e994 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -41,6 +41,8 @@ val ffiEffectful = "urlifyBool_w", "urlifyChannel_w"] in + (* ASK: nicer way than using [Settings.addEffectful] for each Sqlcache + function? Right now they're all always effectful. *) fn (m, f) => Settings.isEffectful (m, f) andalso not (m = "Basis" andalso SS.member (okayWrites, f)) end @@ -807,12 +809,15 @@ val expOfSubexp = fn Pure f => f () | Impure e => e +(* TODO: pick a number. *) +val sizeWorthCaching = 5 + fun makeCache (env, exp', index) = case typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - if expSize (exp', dummyLoc) < 5 (* TODO: pick a number. *) + if expSize (exp', dummyLoc) < sizeWorthCaching then NONE else case List.foldr (fn ((_, _), NONE) => NONE | ((n, typ), SOME args) => diff --git a/src/toy_cache.sml b/src/toy_cache.sml index 126768b6..34a7a26f 100644 --- a/src/toy_cache.sml +++ b/src/toy_cache.sml @@ -13,7 +13,13 @@ val optionStringTyp = (TOption stringTyp, dummyLoc) fun withTyp typ = map (fn exp => (exp, typ)) fun ffiAppCache' (func, index, argTyps) = - EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) + let + val m = "Sqlcache" + val f = func ^ Int.toString index + in + Settings.addEffectful (m, f); + EFfiApp (m, f, argTyps) + end fun check (index, keys) = ffiAppCache' ("check", index, withTyp stringTyp keys) -- cgit v1.2.3 From 3c2143723af4a52064386104d2105137a77bd761 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 28 Sep 2015 22:16:51 -0400 Subject: Begin work on cache merging. --- src/mono_fooify.sml | 2 +- src/sqlcache.sml | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml index 2e32b248..9bf357fb 100644 --- a/src/mono_fooify.sml +++ b/src/mono_fooify.sml @@ -1,4 +1,4 @@ -structure MonoFooify (* :> MONO_FOOIFY *) = struct +structure MonoFooify :> MONO_FOOIFY = struct open Mono diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 1518e994..09feeb36 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -799,6 +799,45 @@ val freeVars = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 +structure InvalidationInfo :> sig + type t + val fromList : int list -> t + val toList : t -> int list + val union : t * t -> t + val unbind : t * int -> t option +end = struct + +(* Keep track of the minimum explicitly. NONE is the empty set. *) +type t = (int * IS.set) option + +val fromList = + List.foldl + (fn (n, NONE) => SOME (n, IS.singleton n) + | (n', SOME (n, ns)) => SOME (Int.min (n, n'), IS.add (ns, n'))) + NONE + +val toList = + fn NONE => [] + | SOME (_, ns) => IS.listItems ns + +val union = + fn (SOME (n1, ns1), SOME (n2, ns2)) => SOME (Int.min (n1, n2), IS.union (ns1, ns2)) + | (NONE, x) => x + | (x, NONE) => x + +val unbind = + fn (SOME (n, ns), unbound) => + let + val n = n - unbound + in + if n < 0 + then NONE + else SOME (SOME (n, IS.map (fn n => n - unbound) ns)) + end + | _ => SOME NONE + +end + datatype subexp = Pure of unit -> exp | Impure of exp val isImpure = -- cgit v1.2.3 From 36cb6a55281f753774e491cce3178eb8c927983e Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 30 Sep 2015 00:33:52 -0400 Subject: Fix SQL-parsing and declaration-ordering bugs. --- src/mono_fooify.sig | 2 ++ src/mono_fooify.sml | 2 +- src/monoize.sml | 16 +++++----- src/sql.sml | 10 ++++--- src/sqlcache.sml | 84 +++++++++++++++++++++++++++++++++-------------------- 5 files changed, 70 insertions(+), 44 deletions(-) (limited to 'src') diff --git a/src/mono_fooify.sig b/src/mono_fooify.sig index ef8f09c2..0cc72342 100644 --- a/src/mono_fooify.sig +++ b/src/mono_fooify.sig @@ -16,6 +16,7 @@ structure Fm : sig val lookup : t -> foo_kind -> int -> (int -> t -> vr * t) -> t * int val lookupList : t -> foo_kind -> Mono.typ -> (int -> t -> vr * t) -> t * int val enter : t -> t + (* This list should be reversed before adding to list of file declarations. *) val decls : t -> Mono.decl list val freshName : t -> int * t @@ -32,6 +33,7 @@ val fooifyExp : foo_kind (* Easy-to-use interface in [Sqlcache]. Uses [Fm.canonical]. *) val canonicalFm : Fm.t ref (* Set at the end of [Monoize]. *) val urlify : MonoEnv.env -> Mono.exp * Mono.typ -> Mono.exp option +(* This list should be reversed before adding to list of file declarations. *) val getNewFmDecls : unit -> Mono.decl list end diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml index 9bf357fb..b7d0b6c6 100644 --- a/src/mono_fooify.sml +++ b/src/mono_fooify.sml @@ -328,7 +328,7 @@ fun getNewFmDecls () = let val fm = !canonicalFm in - (* canonicalFm := Fm.enter fm; *) + canonicalFm := Fm.enter fm; Fm.decls fm end diff --git a/src/monoize.sml b/src/monoize.sml index 4208f594..2e87a70b 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -4344,12 +4344,14 @@ fun monoize env file = val (nullable, notNullable) = calcClientish xts fun cond (x, v) = - (L'.EStrcat (str (Settings.mangleSql x - ^ (case v of - Client => "" - | Channel => " >> 32") - ^ " = "), - target), loc) + (L'.EStrcat ((L'.EStrcat (str ("((" + ^ Settings.mangleSql x + ^ (case v of + Client => "" + | Channel => " >> 32") + ^ ") = "), + target), loc), + str ")"), loc) val e = foldl (fn ((x, v), e) => @@ -4490,7 +4492,7 @@ fun monoize env file = pvars := RM.empty; pvarDefs := []; pvarOldDefs := []; - MonoFooify.canonicalFm := Fm.empty (MonoUtil.File.maxName monoFile); + MonoFooify.canonicalFm := Fm.empty (MonoUtil.File.maxName monoFile + 1); monoFile end diff --git a/src/sql.sml b/src/sql.sml index da0143b7..08315a16 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -321,7 +321,7 @@ val funcName = altL [constK "COUNT", fun arithmetic pExp = follow (const "(") (follow pExp - (follow (altL (map const [" + ", " - ", " * ", " / "])) + (follow (altL (map const [" + ", " - ", " * ", " / ", " >> ", " << "])) (follow pExp (const ")")))) val unmodeled = altL [const "COUNT(*)", @@ -445,9 +445,11 @@ val insert = log "insert" val delete = log "delete" (wrap (follow (const "DELETE FROM ") (follow uw_ident - (follow (follow (opt (const " AS T_T")) (const " WHERE ")) - sqexp))) - (fn ((), (tab, (_, es))) => (tab, es))) + (follow (opt (const " AS T_T")) + (opt (follow (const " WHERE ") sqexp))))) + (fn ((), (tab, (_, wher))) => (tab, case wher of + SOME (_, es) => es + | NONE => SqTrue))) val setting = log "setting" (wrap (follow uw_ident (follow (const " = ") sqexp)) diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 09feeb36..4d4c7d36 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -499,6 +499,8 @@ fun cacheWrap (env, exp, resultTyp, args, i) = let val loc = dummyLoc val rel0 = (ERel 0, loc) + (* DEBUG *) + val () = print (Int.toString i ^ "\n") in case MonoFooify.urlify env (rel0, resultTyp) of NONE => NONE @@ -506,7 +508,7 @@ fun cacheWrap (env, exp, resultTyp, args, i) = let val () = ffiInfo := {index = i, params = length args} :: !ffiInfo (* We ensure before this step that all arguments aren't effectful. - by turning them into local variables as needed. *) + by turning them into local variables as needed. *) val argsInc = map (incRels 1) args val check = (check (i, args), loc) val store = (store (i, argsInc, urlified), loc) @@ -615,7 +617,9 @@ fun addChecking file = in case attempt of SOME pair => pair - | NONE => (e', queryInfo) + (* We have to increment index conservatively. *) + (* TODO: just use a reference for current index.... *) + | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) end | e' => (e', queryInfo) in @@ -672,6 +676,7 @@ val invalidations = Invalidations.invalidations (* DEBUG *) val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] +val gunk' : exp list ref = ref [] fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = let @@ -680,26 +685,30 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = val doExp = fn EDml (origDmlText, failureMode) => let + (* DEBUG *) + val () = gunk' := origDmlText :: !gunk' val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty dmlText)) *) - val invs = + val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) + val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => - map (fn i => (case IM.find (indexToQueryNumArgs, i) of - SOME queryNumArgs => - (* DEBUG *) - (gunk := (queryNumArgs, dmlParsed) :: !gunk; - (i, invalidations (queryNumArgs, dmlParsed))) - (* TODO: fail more gracefully. *) - | NONE => raise Match)) - (SIMM.findList (tableToIndices, tableDml dmlParsed)) - (* TODO: fail more gracefully. *) - | NONE => raise Match + SOME (map (fn i => (case IM.find (indexToQueryNumArgs, i) of + SOME queryNumArgs => + (* DEBUG *) + (gunk := (queryNumArgs, dmlParsed) :: !gunk; + (i, invalidations (queryNumArgs, dmlParsed))) + (* TODO: fail more gracefully. *) + | NONE => raise Match)) + (SIMM.findList (tableToIndices, tableDml dmlParsed))) + | NONE => NONE in - wrapLets (sequence (flushes invs @ [dmlExp])) + case inval of + (* TODO: fail more gracefully. *) + NONE => raise Match + | SOME invs => wrapLets (sequence (flushes invs @ [dmlExp])) end | e' => e' in @@ -801,6 +810,7 @@ val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 structure InvalidationInfo :> sig type t + val empty : t val fromList : int list -> t val toList : t -> int list val union : t * t -> t @@ -816,14 +826,16 @@ val fromList = | (n', SOME (n, ns)) => SOME (Int.min (n, n'), IS.add (ns, n'))) NONE +val empty = fromList [] + val toList = fn NONE => [] | SOME (_, ns) => IS.listItems ns val union = fn (SOME (n1, ns1), SOME (n2, ns2)) => SOME (Int.min (n1, n2), IS.union (ns1, ns2)) - | (NONE, x) => x - | (x, NONE) => x + | (NONE, info) => info + | (info, NONE) => info val unbind = fn (SOME (n, ns), unbound) => @@ -838,6 +850,15 @@ val unbind = end +val unionUnbind = + List.foldl + (fn (_, NONE) => NONE + | ((info, unbound), SOME infoAcc) => + case InvalidationInfo.unbind (info, unbound) of + NONE => NONE + | SOME info => SOME (InvalidationInfo.union (info, infoAcc))) + (SOME InvalidationInfo.empty) + datatype subexp = Pure of unit -> exp | Impure of exp val isImpure = @@ -936,44 +957,43 @@ fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int index + 1) end -fun addPure ((decls, sideInfo), index, effs) = +fun addPure ((decls, sideInfo), indexStart, effs) = let - fun doVal ((x, n, t, exp, s), index) = + fun doVal env ((x, n, t, exp, s), index) = let - val (subexp, index) = pureCache effs ((MonoEnv.empty, exp), index) + val (subexp, index) = pureCache effs ((env, exp), index) in ((x, n, t, expOfSubexp subexp, s), index) end - fun doDecl' (decl', index) = + fun doDecl' env (decl', index) = case decl' of DVal v => let - val (v, index) = (doVal (v, index)) + val (v, index) = doVal env (v, index) in (DVal v, index) end | DValRec vs => let - val (vs, index) = ListUtil.foldlMap doVal index vs + val (vs, index) = ListUtil.foldlMap (doVal env) index vs in (DValRec vs, index) end | _ => (decl', index) - fun doDecl ((decl', loc), index) = + fun doDecl (decl as (decl', loc), (revDecls, env, index)) = let - val (decl', index) = doDecl' (decl', index) + val env = MonoEnv.declBinds env decl + val (decl', index) = doDecl' env (decl', index) + (* Important that this happens after [MonoFooify.urlify] calls! *) + val fmDecls = MonoFooify.getNewFmDecls () in - ((decl', loc), index) + ((decl', loc) :: (fmDecls @ revDecls), env, index) end - val decls = #1 (ListUtil.foldlMap doDecl index decls) - (* Important that this happens after the MonoFooify.urlify calls! *) - val fmDecls = MonoFooify.getNewFmDecls () in - (* ASK: fmDecls before or after? *) - (fmDecls @ decls, sideInfo) + (rev (#1 (List.foldl doDecl ([], MonoEnv.empty, indexStart) decls)), sideInfo) end -val go' = addPure o addFlushing o addChecking o inlineSql +val go' = addPure o addFlushing o addChecking (* DEBUG: add back [o inlineSql]. *) fun go file = let -- cgit v1.2.3 From 013ea39e9f187efbb0e3a613264a1c7adfebe692 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 7 Oct 2015 08:58:08 -0400 Subject: Fix recording bugs to do with nesting and buffer reallocation. Stop MonoFooify printing spurious errors. --- src/c/urweb.c | 26 +++++++++---- src/lru_cache.sml | 3 +- src/mono_fooify.sml | 75 +++++++++++++++++++++--------------- src/sqlcache.sml | 107 +++++++++++++++++++++++++++++++--------------------- src/toy_cache.sml | 16 ++++++-- 5 files changed, 141 insertions(+), 86 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 61742693..957f158c 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -72,6 +72,9 @@ void uw_buffer_free(uw_buffer *b) { void uw_buffer_reset(uw_buffer *b) { b->front = b->start; + if (b->front != b->back) { + *b->front = 0; + } } int uw_buffer_check(uw_buffer *b, size_t extra) { @@ -486,7 +489,8 @@ struct uw_context { size_t output_buffer_size; // For caching. - char *recording; + int numRecording; + int recordingOffset; int remoteSock; }; @@ -572,7 +576,8 @@ uw_context uw_init(int id, uw_loggers *lg) { ctx->output_buffer = malloc(1); ctx->output_buffer_size = 1; - ctx->recording = 0; + ctx->numRecording = 0; + ctx->recordingOffset = 0; ctx->remoteSock = -1; @@ -1689,11 +1694,18 @@ void uw_write(uw_context ctx, const char* s) { } void uw_recordingStart(uw_context ctx) { - ctx->recording = ctx->page.front; + if (ctx->numRecording++ == 0) { + ctx->recordingOffset = ctx->page.front - ctx->page.start; + } } char *uw_recordingRead(uw_context ctx) { - return strdup(ctx->recording); + // Only the outermost recorder can read unless the recording is empty. + char *recording = ctx->page.start + ctx->recordingOffset; + if (--ctx->numRecording > 0 && recording != ctx->page.front) { + return NULL; + } + return strdup(recording); } char *uw_Basis_attrifyInt(uw_context ctx, uw_Basis_int n) { @@ -4543,7 +4555,7 @@ time_t uw_Sqlcache_timeMax(time_t x, time_t y) { return difftime(x, y) > 0 ? x : y; } -void uw_Sqlcache_freeuw_Sqlcache_CacheValue(uw_Sqlcache_CacheValue *value) { +void uw_Sqlcache_free(uw_Sqlcache_CacheValue *value) { if (value) { free(value->result); free(value->output); @@ -4554,7 +4566,7 @@ void uw_Sqlcache_freeuw_Sqlcache_CacheValue(uw_Sqlcache_CacheValue *value) { void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_CacheEntry* entry) { //uw_Sqlcache_listUw_Sqlcache_Delete(cache->lru, entry); HASH_DELETE(hh, cache->table, entry); - uw_Sqlcache_freeuw_Sqlcache_CacheValue(entry->value); + uw_Sqlcache_free(entry->value); free(entry->key); free(entry); } @@ -4595,7 +4607,7 @@ void uw_Sqlcache_storeHelper(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_ entry->timeValid = timeNow; if (cache->height == 0) { //uw_Sqlcache_listAdd(cache->lru, entry); - uw_Sqlcache_freeuw_Sqlcache_CacheValue(entry->value); + uw_Sqlcache_free(entry->value); entry->value = value; //if (cache->lru->size > MAX_SIZE) { //uw_Sqlcache_delete(cache, cache->lru->first); diff --git a/src/lru_cache.sml b/src/lru_cache.sml index b8dfde5e..275c3061 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -91,7 +91,8 @@ fun setupQuery {index, params} = newline, string (" uw_Sqlcache_CacheValue *v = uw_Sqlcache_check(cache" ^ i ^ ", ks);"), newline, - string " if (v) {", + (* If the output is null, it means we had too much recursion, so it's a miss. *) + string " if (v && v->output != NULL) {", newline, string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), newline, diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml index b7d0b6c6..bbd34b15 100644 --- a/src/mono_fooify.sml +++ b/src/mono_fooify.sml @@ -127,9 +127,13 @@ fun capitalize s = structure E = ErrorMsg +exception TypeMismatch of Fm.t * E.span +exception CantPass of Fm.t * typ +exception DontKnow of Fm.t * typ + val dummyExp = (EPrim (Prim.Int 0), E.dummySpan) -fun fooifyExp fk lookupENamed lookupDatatype = +fun fooifyExpWithExceptions fk lookupENamed lookupDatatype = let fun fooify fm (e, tAll as (t, loc)) = case #1 e of @@ -155,8 +159,7 @@ fun fooifyExp fk lookupENamed lookupDatatype = arg'), loc)), loc), fm) end - | _ => (E.errorAt loc "Type mismatch encoding attribute"; - (e, fm)) + | _ => raise TypeMismatch (fm, loc) in attrify (args, ft, (EPrim (Prim.String (Prim.Normal, Settings.getUrlPrefix () ^ s)), loc), fm) end @@ -165,10 +168,8 @@ fun fooifyExp fk lookupENamed lookupDatatype = TFfi ("Basis", "unit") => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) | TFfi (m, x) => (if Settings.mayClientToServer (m, x) (* TODO: better error message. (Then again, user should never see this.) *) - then () - else (E.errorAt loc "MonoFooify: can't pass type from client to server"; - Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty tAll)]); - ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm)) + then ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm) + else raise CantPass (fm, tAll)) | TRecord [] => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) | TRecord ((x, t) :: xts) => @@ -291,38 +292,50 @@ fun fooifyExp fk lookupENamed lookupDatatype = ((EApp ((ENamed n, loc), e), loc), fm) end - | _ => (E.errorAt loc "Don't know how to encode attribute/URL type"; - Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty tAll)]; - (dummyExp, fm)) + | _ => raise DontKnow (fm, tAll) in fooify end +fun fooifyExp fk lookupENamed lookupDatatype fm exp = + fooifyExpWithExceptions fk lookupENamed lookupDatatype fm exp + handle TypeMismatch (fm, loc) => + (E.errorAt loc "Type mismatch encoding attribute"; + (dummyExp, fm)) + | CantPass (fm, typ as (_, loc)) => + (E.errorAt loc "MonoFooify: can't pass type from client to server"; + Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty typ)]; + (dummyExp, fm)) + | DontKnow (fm, typ as (_, loc)) => + (E.errorAt loc "Don't know how to encode attribute/URL type"; + Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty typ)]; + (dummyExp, fm)) + + (* Has to be set at the end of [Monoize]. *) val canonicalFm = ref (Fm.empty 0 : Fm.t) fun urlify env expTyp = - if ErrorMsg.anyErrors () - then ((* DEBUG *) print "already error"; NONE) - else - let - val (exp, fm) = - fooifyExp - Url - (fn n => - let - val (_, t, _, s) = MonoEnv.lookupENamed env n - in - (t, s) - end) - (fn n => MonoEnv.lookupDatatype env n) - (!canonicalFm) - expTyp - in - if ErrorMsg.anyErrors () - then ((* DEBUG *) print "why"; (ErrorMsg.resetErrors (); NONE)) - else (canonicalFm := fm; SOME exp) - end + let + val (exp, fm) = + fooifyExpWithExceptions + Url + (fn n => + let + val (_, t, _, s) = MonoEnv.lookupENamed env n + in + (t, s) + end) + (fn n => MonoEnv.lookupDatatype env n) + (!canonicalFm) + expTyp + in + canonicalFm := fm; + SOME exp + end + handle TypeMismatch _ => NONE + | CantPass _ => NONE + | DontKnow _ => NONE fun getNewFmDecls () = let diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 4d4c7d36..dd851787 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -53,8 +53,9 @@ fun getCache () = !cache (* Used to have type context for local variables in MonoUtil functions. *) val doBind = - fn (env, MonoUtil.Exp.RelE (s, t)) => MonoEnv.pushERel env s t NONE - | (env, _) => env + fn (env, MonoUtil.Exp.RelE (x, t)) => MonoEnv.pushERel env x t NONE + | (env, MonoUtil.Exp.NamedE (x, n, t, eo, s)) => MonoEnv.pushENamed env x n t eo s + | (env, MonoUtil.Exp.Datatype (x, n, cs)) => MonoEnv.pushDatatype env x n cs (*******************) @@ -499,8 +500,6 @@ fun cacheWrap (env, exp, resultTyp, args, i) = let val loc = dummyLoc val rel0 = (ERel 0, loc) - (* DEBUG *) - val () = print (Int.toString i ^ "\n") in case MonoFooify.urlify env (rel0, resultTyp) of NONE => NONE @@ -524,7 +523,42 @@ fun cacheWrap (env, exp, resultTyp, args, i) = end end -fun fileMapfoldB doExp file start = +fun fileTopLevelMapfoldB doTopLevelExp (decls, sideInfo) state = + let + fun doVal env ((x, n, t, exp, s), state) = + let + val (exp, state) = doTopLevelExp env exp state + in + ((x, n, t, exp, s), state) + end + fun doDecl' env (decl', state) = + case decl' of + DVal v => + let + val (v, state) = doVal env (v, state) + in + (DVal v, state) + end + | DValRec vs => + let + val (vs, state) = ListUtil.foldlMap (doVal env) state vs + in + (DValRec vs, state) + end + | _ => (decl', state) + fun doDecl (decl as (decl', loc), (env, state)) = + let + val env = MonoEnv.declBinds env decl + val (decl', state) = doDecl' env (decl', state) + in + ((decl', loc), (env, state)) + end + val (decls, (_, state)) = (ListUtil.foldlMap doDecl (MonoEnv.empty, state) decls) + in + ((decls, sideInfo), state) + end + +fun fileAllMapfoldB doExp file start = case MonoUtil.File.mapfoldB {typ = Search.return2, exp = fn env => fn e' => fn s => Search.Continue (doExp env e' s), @@ -534,7 +568,7 @@ fun fileMapfoldB doExp file start = Search.Continue x => x | Search.Return _ => raise Match -fun fileMap doExp file = #1 (fileMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) +fun fileMap doExp file = #1 (fileAllMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) fun factorOutNontrivial text = let @@ -623,7 +657,7 @@ fun addChecking file = end | e' => (e', queryInfo) in - (fileMapfoldB (fn env => fn exp => fn state => doExp env state exp) + (fileAllMapfoldB (fn env => fn exp => fn state => doExp env state exp) file (SIMM.empty, IM.empty, 0), effs) @@ -675,8 +709,8 @@ end val invalidations = Invalidations.invalidations (* DEBUG *) -val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] -val gunk' : exp list ref = ref [] +(* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) +(* val gunk' : exp list ref = ref [] *) fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = let @@ -686,19 +720,19 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = fn EDml (origDmlText, failureMode) => let (* DEBUG *) - val () = gunk' := origDmlText :: !gunk' + (* val () = gunk' := origDmlText :: !gunk' *) val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) (* DEBUG *) - val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) + (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => SOME (map (fn i => (case IM.find (indexToQueryNumArgs, i) of SOME queryNumArgs => (* DEBUG *) - (gunk := (queryNumArgs, dmlParsed) :: !gunk; + ((* gunk := (queryNumArgs, dmlParsed) :: !gunk; *) (i, invalidations (queryNumArgs, dmlParsed))) (* TODO: fail more gracefully. *) | NONE => raise Match)) @@ -713,7 +747,7 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = | e' => e' in (* DEBUG *) - gunk := []; + (* gunk := []; *) (fileMap doExp file, index, effs) end @@ -957,52 +991,37 @@ fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int index + 1) end -fun addPure ((decls, sideInfo), indexStart, effs) = +fun addPure (file, indexStart, effs) = let - fun doVal env ((x, n, t, exp, s), index) = + fun doTopLevelExp env exp index = let val (subexp, index) = pureCache effs ((env, exp), index) in - ((x, n, t, expOfSubexp subexp, s), index) - end - fun doDecl' env (decl', index) = - case decl' of - DVal v => - let - val (v, index) = doVal env (v, index) - in - (DVal v, index) - end - | DValRec vs => - let - val (vs, index) = ListUtil.foldlMap (doVal env) index vs - in - (DValRec vs, index) - end - | _ => (decl', index) - fun doDecl (decl as (decl', loc), (revDecls, env, index)) = - let - val env = MonoEnv.declBinds env decl - val (decl', index) = doDecl' env (decl', index) - (* Important that this happens after [MonoFooify.urlify] calls! *) - val fmDecls = MonoFooify.getNewFmDecls () - in - ((decl', loc) :: (fmDecls @ revDecls), env, index) + (expOfSubexp subexp, index) end in - (rev (#1 (List.foldl doDecl ([], MonoEnv.empty, indexStart) decls)), sideInfo) + #1 (fileTopLevelMapfoldB doTopLevelExp file indexStart) + end + +fun insertAfterDatatypes ((decls, sideInfo), newDecls) = + let + val (datatypes, others) = List.partition (fn (DDatatype _, _) => true | _ => false) decls + in + (datatypes @ newDecls @ others, sideInfo) end -val go' = addPure o addFlushing o addChecking (* DEBUG: add back [o inlineSql]. *) +val go' = addPure o addFlushing o addChecking o inlineSql fun go file = let (* TODO: do something nicer than [Sql] being in one of two modes. *) val () = (resetFfiInfo (); Sql.sqlcacheMode := true) - val file' = go' file + val file = go' file + (* Important that this happens after [MonoFooify.urlify] calls! *) + val fmDecls = MonoFooify.getNewFmDecls () val () = Sql.sqlcacheMode := false in - file' + insertAfterDatatypes (file, rev fmDecls) end end diff --git a/src/toy_cache.sml b/src/toy_cache.sml index 34a7a26f..cfde027b 100644 --- a/src/toy_cache.sml +++ b/src/toy_cache.sml @@ -95,7 +95,7 @@ fun setupQuery {index, params} = string args, string ") {", newline, - string "if (cacheQuery", + string "if (cacheWrite", string i, (* ASK: is returning the pointer okay? Should we duplicate? *) string " == NULL", @@ -116,9 +116,11 @@ fun setupQuery {index, params} = string i, string ".\");", newline, - string "uw_write(ctx, cacheWrite", + string " if (cacheWrite", string i, - string ");", + string " != NULL) { uw_write(ctx, cacheWrite", + string i, + string "); }", newline, string "return cacheQuery", string i, @@ -176,6 +178,14 @@ fun setupQuery {index, params} = string i, string " = NULL;", newline, + string "free(cacheWrite", + string i, + string ");", + newline, + string "cacheWrite", + string i, + string " = NULL;", + newline, string "puts(\"SQLCACHE: flush ", string i, string ".\");}", -- cgit v1.2.3 From 51117ba9333e00cdd8c4c31307effbe93601d328 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 13 Oct 2015 20:24:37 -0400 Subject: Fix another mismatch between expunger SQL generation and SQL parser. --- src/monoize.sml | 23 ++++++------ src/sqlcache.sml | 108 ++++++++++++++++++++++++++++--------------------------- 2 files changed, 68 insertions(+), 63 deletions(-) (limited to 'src') diff --git a/src/monoize.sml b/src/monoize.sml index 2e87a70b..bdd8f5c3 100644 --- a/src/monoize.sml +++ b/src/monoize.sml @@ -4371,16 +4371,19 @@ fun monoize env file = [] => e | eb :: ebs => (L'.ESeq ( - (L'.EDml (foldl - (fn (eb, s) => - (L'.EStrcat (s, - (L'.EStrcat (str " OR ", - cond eb), loc)), loc)) - (L'.EStrcat (str ("DELETE FROM " - ^ Settings.mangleSql tab - ^ " WHERE "), - cond eb), loc) - ebs, L'.Error), loc), + (L'.EDml ((L'.EStrcat (str ("DELETE FROM " + ^ Settings.mangleSql tab + ^ " WHERE "), + foldl (fn (eb, s) => + (L'.EStrcat (str "(", + (L'.EStrcat (s, + (L'.EStrcat (str " OR ", + (L'.EStrcat (cond eb, + str ")"), + loc)), loc)), loc)), loc)) + (cond eb) + ebs), loc), + L'.Error), loc), e), loc) in e diff --git a/src/sqlcache.sml b/src/sqlcache.sml index dd851787..f3db5795 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -604,62 +604,64 @@ fun factorOutNontrivial text = (newText, wrapLets, numArgs) end +fun cacheQuery effs env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = + fn e' as EQuery {query = origQueryText, + state = resultTyp, + initial, body, tables, exps} => + let + val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText + (* Increment once for each new variable just made. *) + val queryExp = incRels numArgs + (EQuery {query = newQueryText, + state = resultTyp, + initial = initial, + body = body, + tables = tables, + exps = exps}, + dummyLoc) + (* DEBUG *) + (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) + val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) + fun bind x f = Option.mapPartial f x + fun guard b x = if b then x else NONE + (* We use dummyTyp here. I think this is okay because databases don't + store (effectful) functions, but perhaps there's some pathalogical + corner case missing.... *) + fun safe bound = + not + o effectful effs + (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) + bound + env) + val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE + val attempt = + (* Ziv misses Haskell's do notation.... *) + bind (textOfQuery queryExp) (fn queryText => + guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( + bind (Sql.parse Sql.query queryText) (fn queryParsed => + bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => + SOME (wrapLets cachedExp, + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + index + 1)))))) + in + case attempt of + SOME pair => pair + (* We have to increment index conservatively. *) + (* TODO: just use a reference for current index.... *) + | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) + end + | e' => (e', queryInfo) + fun addChecking file = let val effs = effectfulDecls file - fun doExp env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = - fn e' as EQuery {query = origQueryText, - state = resultTyp, - initial, body, tables, exps} => - let - val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText - (* Increment once for each new variable just made. *) - val queryExp = incRels numArgs - (EQuery {query = newQueryText, - state = resultTyp, - initial = initial, - body = body, - tables = tables, - exps = exps}, - dummyLoc) - val (EQuery {query = queryText, ...}, _) = queryExp - (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) - val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) - fun bind x f = Option.mapPartial f x - fun guard b x = if b then x else NONE - (* We use dummyTyp here. I think this is okay because databases - don't store (effectful) functions, but perhaps there's some - pathalogical corner case missing.... *) - fun safe bound = - not - o effectful effs - (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) - bound - env) - val attempt = - (* Ziv misses Haskell's do notation.... *) - guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( - bind (Sql.parse Sql.query queryText) (fn queryParsed => - bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => - SOME (wrapLets cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1))))) - in - case attempt of - SOME pair => pair - (* We have to increment index conservatively. *) - (* TODO: just use a reference for current index.... *) - | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) - end - | e' => (e', queryInfo) in - (fileAllMapfoldB (fn env => fn exp => fn state => doExp env state exp) - file - (SIMM.empty, IM.empty, 0), + (fileAllMapfoldB (fn env => fn exp => fn state => cacheQuery effs env state exp) + file + (SIMM.empty, IM.empty, 0), effs) end @@ -725,7 +727,7 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = val dmlText = incRels numArgs newDmlText val dmlExp = EDml (dmlText, failureMode) (* DEBUG *) - (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) + val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => -- cgit v1.2.3 From e86ed0717e35bea1ad6127d193e5979aec4841b9 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 14 Oct 2015 00:07:00 -0400 Subject: Hard-code Sqlcache module (in Ur/Web) as effectful and reorder sqlcache.sml. --- src/lru_cache.sml | 8 +- src/mono_fooify.sml | 2 - src/settings.sml | 3 +- src/sqlcache.sml | 478 +++++++++++++++++++++++++++------------------------- src/toy_cache.sml | 8 +- 5 files changed, 250 insertions(+), 249 deletions(-) (limited to 'src') diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 275c3061..e69624d8 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -13,13 +13,7 @@ val optionStringTyp = (TOption stringTyp, dummyLoc) fun withTyp typ = map (fn exp => (exp, typ)) fun ffiAppCache' (func, index, argTyps) = - let - val m = "Sqlcache" - val f = func ^ Int.toString index - in - Settings.addEffectful (m, f); - EFfiApp (m, f, argTyps) - end + EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) fun check (index, keys) = ffiAppCache' ("check", index, withTyp stringTyp keys) diff --git a/src/mono_fooify.sml b/src/mono_fooify.sml index bbd34b15..e64207cd 100644 --- a/src/mono_fooify.sml +++ b/src/mono_fooify.sml @@ -167,7 +167,6 @@ fun fooifyExpWithExceptions fk lookupENamed lookupDatatype = case t of TFfi ("Basis", "unit") => ((EPrim (Prim.String (Prim.Normal, "")), loc), fm) | TFfi (m, x) => (if Settings.mayClientToServer (m, x) - (* TODO: better error message. (Then again, user should never see this.) *) then ((EFfiApp (m, fk2s fk ^ "ify" ^ capitalize x, [(e, tAll)]), loc), fm) else raise CantPass (fm, tAll)) @@ -311,7 +310,6 @@ fun fooifyExp fk lookupENamed lookupDatatype fm exp = Print.eprefaces' [("Type", MonoPrint.p_typ MonoEnv.empty typ)]; (dummyExp, fm)) - (* Has to be set at the end of [Monoize]. *) val canonicalFm = ref (Fm.empty 0 : Fm.t) diff --git a/src/settings.sml b/src/settings.sml index ff99bf13..ecf353cd 100644 --- a/src/settings.sml +++ b/src/settings.sml @@ -151,7 +151,8 @@ val effectfulBase = basis ["dml", val effectful = ref effectfulBase fun setEffectful ls = effectful := S.addList (effectfulBase, ls) -fun isEffectful x = S.member (!effectful, x) +fun isEffectful ("Sqlcache", _) = true + | isEffectful x = S.member (!effectful, x) fun addEffectful x = effectful := S.add (!effectful, x) val benignBase = basis ["get_cookie", diff --git a/src/sqlcache.sml b/src/sqlcache.sml index f3db5795..1a4d4e97 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -15,7 +15,7 @@ fun iterate f n x = if n < 0 then x else iterate f (n-1) (f x) -(* Filled in by [cacheWrap] during [Sqlcache]. *) +(* Filled in by [cacheWrap]. *) val ffiInfo : {index : int, params : int} list ref = ref [] fun resetFfiInfo () = ffiInfo := [] @@ -41,8 +41,7 @@ val ffiEffectful = "urlifyBool_w", "urlifyChannel_w"] in - (* ASK: nicer way than using [Settings.addEffectful] for each Sqlcache - function? Right now they're all always effectful. *) + (* ASK: is it okay to hardcode Sqlcache functions as effectful? *) fn (m, f) => Settings.isEffectful (m, f) andalso not (m = "Basis" andalso SS.member (okayWrites, f)) end @@ -456,9 +455,9 @@ val tableDml = | Sql.Update (tab, _, _) => tab -(***************************) -(* Program Instrumentation *) -(***************************) +(*************************************) +(* Program Instrumentation Utilities *) +(*************************************) val varName = let @@ -496,33 +495,6 @@ fun incRels inc = bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} 0 -fun cacheWrap (env, exp, resultTyp, args, i) = - let - val loc = dummyLoc - val rel0 = (ERel 0, loc) - in - case MonoFooify.urlify env (rel0, resultTyp) of - NONE => NONE - | SOME urlified => - let - val () = ffiInfo := {index = i, params = length args} :: !ffiInfo - (* We ensure before this step that all arguments aren't effectful. - by turning them into local variables as needed. *) - val argsInc = map (incRels 1) args - val check = (check (i, args), loc) - val store = (store (i, argsInc, urlified), loc) - in - SOME (ECase - (check, - [((PNone stringTyp, loc), - (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), - (* Boolean is false because we're not unurlifying from a cookie. *) - (EUnurlify (rel0, resultTyp, false), loc))], - {disc = (TOption stringTyp, loc), result = resultTyp})) - end - end - fun fileTopLevelMapfoldB doTopLevelExp (decls, sideInfo) state = let fun doVal env ((x, n, t, exp, s), state) = @@ -570,205 +542,6 @@ fun fileAllMapfoldB doExp file start = fun fileMap doExp file = #1 (fileAllMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) -fun factorOutNontrivial text = - let - val loc = dummyLoc - fun strcat (e1, e2) = (EStrcat (e1, e2), loc) - val chunks = Sql.chunkify text - val (newText, newVariables) = - (* Important that this is foldr (to oppose foldl below). *) - List.foldr - (fn (chunk, (qText, newVars)) => - (* Variable bound to the head of newBs will have the lowest index. *) - case chunk of - Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) - | Sql.Exp e => - let - val n = length newVars - in - (* This is the (n+1)th new variable, so there are - already n new variables bound, so we increment - indices by n. *) - (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) - end - | Sql.String s => (strcat (stringExp s, qText), newVars)) - (stringExp "", []) - chunks - fun wrapLets e' = - (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) - e' - newVariables - val numArgs = length newVariables - in - (newText, wrapLets, numArgs) - end - -fun cacheQuery effs env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = - fn e' as EQuery {query = origQueryText, - state = resultTyp, - initial, body, tables, exps} => - let - val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText - (* Increment once for each new variable just made. *) - val queryExp = incRels numArgs - (EQuery {query = newQueryText, - state = resultTyp, - initial = initial, - body = body, - tables = tables, - exps = exps}, - dummyLoc) - (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) - val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) - fun bind x f = Option.mapPartial f x - fun guard b x = if b then x else NONE - (* We use dummyTyp here. I think this is okay because databases don't - store (effectful) functions, but perhaps there's some pathalogical - corner case missing.... *) - fun safe bound = - not - o effectful effs - (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) - bound - env) - val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE - val attempt = - (* Ziv misses Haskell's do notation.... *) - bind (textOfQuery queryExp) (fn queryText => - guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( - bind (Sql.parse Sql.query queryText) (fn queryParsed => - bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => - SOME (wrapLets cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1)))))) - in - case attempt of - SOME pair => pair - (* We have to increment index conservatively. *) - (* TODO: just use a reference for current index.... *) - | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) - end - | e' => (e', queryInfo) - -fun addChecking file = - let - val effs = effectfulDecls file - in - (fileAllMapfoldB (fn env => fn exp => fn state => cacheQuery effs env state exp) - file - (SIMM.empty, IM.empty, 0), - effs) - end - -structure Invalidations = struct - - val loc = dummyLoc - - val optionAtomExpToExp = - fn NONE => (ENone stringTyp, loc) - | SOME e => (ESome (stringTyp, - (case e of - DmlRel n => ERel n - | Prim p => EPrim p - (* TODO: make new type containing only these two. *) - | _ => raise Match, - loc)), - loc) - - fun eqsToInvalidation numArgs eqs = - let - fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) - in - inv (numArgs - 1) - end - - (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here - represents unknown, which means a wider invalidation. *) - val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = - fn ([], []) => true - | (_ :: xs, NONE :: ys) => madeRedundantBy (xs, ys) - | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of - EQUAL => madeRedundantBy (xs, ys) - | _ => false) - | _ => false - - fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) - - fun invalidations ((query, numArgs), dml) = - (map (map optionAtomExpToExp) - o removeRedundant madeRedundantBy - o map (eqsToInvalidation numArgs) - o eqss) - (query, dml) - -end - -val invalidations = Invalidations.invalidations - -(* DEBUG *) -(* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) -(* val gunk' : exp list ref = ref [] *) - -fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = - let - val flushes = List.concat - o map (fn (i, argss) => map (fn args => flush (i, args)) argss) - val doExp = - fn EDml (origDmlText, failureMode) => - let - (* DEBUG *) - (* val () = gunk' := origDmlText :: !gunk' *) - val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText - val dmlText = incRels numArgs newDmlText - val dmlExp = EDml (dmlText, failureMode) - (* DEBUG *) - val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) - val inval = - case Sql.parse Sql.dml dmlText of - SOME dmlParsed => - SOME (map (fn i => (case IM.find (indexToQueryNumArgs, i) of - SOME queryNumArgs => - (* DEBUG *) - ((* gunk := (queryNumArgs, dmlParsed) :: !gunk; *) - (i, invalidations (queryNumArgs, dmlParsed))) - (* TODO: fail more gracefully. *) - | NONE => raise Match)) - (SIMM.findList (tableToIndices, tableDml dmlParsed))) - | NONE => NONE - in - case inval of - (* TODO: fail more gracefully. *) - NONE => raise Match - | SOME invs => wrapLets (sequence (flushes invs @ [dmlExp])) - end - | e' => e' - in - (* DEBUG *) - (* gunk := []; *) - (fileMap doExp file, index, effs) - end - -val inlineSql = - let - val doExp = - (* TODO: EQuery, too? *) - (* ASK: should this live in [MonoOpt]? *) - fn EDml ((ECase (disc, cases, {disc = dTyp, ...}), loc), failureMode) => - let - val newCases = map (fn (p, e) => (p, (EDml (e, failureMode), loc))) cases - in - ECase (disc, newCases, {disc = dTyp, result = (TRecord [], loc)}) - end - | e => e - in - fileMap doExp - end - (**********************) (* Mono Type Checking *) @@ -830,6 +603,33 @@ and typOfExp env (e', loc) = typOfExp' env e' (* Caching Pure Subexpressions *) (*******************************) +fun cacheWrap (env, exp, resultTyp, args, i) = + let + val loc = dummyLoc + val rel0 = (ERel 0, loc) + in + case MonoFooify.urlify env (rel0, resultTyp) of + NONE => NONE + | SOME urlified => + let + val () = ffiInfo := {index = i, params = length args} :: !ffiInfo + (* We ensure before this step that all arguments aren't effectful. + by turning them into local variables as needed. *) + val argsInc = map (incRels 1) args + val check = (check (i, args), loc) + val store = (store (i, argsInc, urlified), loc) + in + SOME (ECase + (check, + [((PNone stringTyp, loc), + (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, resultTyp, false), loc))], + {disc = (TOption stringTyp, loc), result = resultTyp})) + end + end + val freeVars = IS.listItems o MonoUtil.Exp.foldB @@ -1005,6 +805,220 @@ fun addPure (file, indexStart, effs) = #1 (fileTopLevelMapfoldB doTopLevelExp file indexStart) end + +(***********************) +(* Caching SQL Queries *) +(***********************) + +fun factorOutNontrivial text = + let + val loc = dummyLoc + fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val chunks = Sql.chunkify text + val (newText, newVariables) = + (* Important that this is foldr (to oppose foldl below). *) + List.foldr + (fn (chunk, (qText, newVars)) => + (* Variable bound to the head of newBs will have the lowest index. *) + case chunk of + Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Sql.Exp e => + let + val n = length newVars + in + (* This is the (n+1)th new variable, so there are + already n new variables bound, so we increment + indices by n. *) + (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) + end + | Sql.String s => (strcat (stringExp s, qText), newVars)) + (stringExp "", []) + chunks + fun wrapLets e' = + (* Important that this is foldl (to oppose foldr above). *) + List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) + e' + newVariables + val numArgs = length newVariables + in + (newText, wrapLets, numArgs) + end + +fun cacheQuery effs env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = + fn e' as EQuery {query = origQueryText, + state = resultTyp, + initial, body, tables, exps} => + let + val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText + (* Increment once for each new variable just made. *) + val queryExp = incRels numArgs + (EQuery {query = newQueryText, + state = resultTyp, + initial = initial, + body = body, + tables = tables, + exps = exps}, + dummyLoc) + (* DEBUG *) + (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) + val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) + fun bind x f = Option.mapPartial f x + fun guard b x = if b then x else NONE + (* We use dummyTyp here. I think this is okay because databases don't + store (effectful) functions, but perhaps there's some pathalogical + corner case missing.... *) + fun safe bound = + not + o effectful effs + (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) + bound + env) + val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE + val attempt = + (* Ziv misses Haskell's do notation.... *) + bind (textOfQuery queryExp) (fn queryText => + guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( + bind (Sql.parse Sql.query queryText) (fn queryParsed => + bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => + SOME (wrapLets cachedExp, + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + index + 1)))))) + in + case attempt of + SOME pair => pair + (* We have to increment index conservatively. *) + (* TODO: just use a reference for current index.... *) + | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) + end + | e' => (e', queryInfo) + +fun addChecking file = + let + val effs = effectfulDecls file + in + (fileAllMapfoldB (fn env => fn exp => fn state => cacheQuery effs env state exp) + file + (SIMM.empty, IM.empty, 0), + effs) + end + + +(************) +(* Flushing *) +(************) + +structure Invalidations = struct + + val loc = dummyLoc + + val optionAtomExpToExp = + fn NONE => (ENone stringTyp, loc) + | SOME e => (ESome (stringTyp, + (case e of + DmlRel n => ERel n + | Prim p => EPrim p + (* TODO: make new type containing only these two. *) + | _ => raise Match, + loc)), + loc) + + fun eqsToInvalidation numArgs eqs = + let + fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) + in + inv (numArgs - 1) + end + + (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here + represents unknown, which means a wider invalidation. *) + val rec madeRedundantBy : atomExp option list * atomExp option list -> bool = + fn ([], []) => true + | (_ :: xs, NONE :: ys) => madeRedundantBy (xs, ys) + | (SOME x :: xs, SOME y :: ys) => (case AtomExpKey.compare (x, y) of + EQUAL => madeRedundantBy (xs, ys) + | _ => false) + | _ => false + + fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) + + fun invalidations ((query, numArgs), dml) = + (map (map optionAtomExpToExp) + o removeRedundant madeRedundantBy + o map (eqsToInvalidation numArgs) + o eqss) + (query, dml) + +end + +val invalidations = Invalidations.invalidations + +(* DEBUG *) +(* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) +(* val gunk' : exp list ref = ref [] *) + +fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = + let + val flushes = List.concat + o map (fn (i, argss) => map (fn args => flush (i, args)) argss) + val doExp = + fn EDml (origDmlText, failureMode) => + let + (* DEBUG *) + (* val () = gunk' := origDmlText :: !gunk' *) + val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText + val dmlText = incRels numArgs newDmlText + val dmlExp = EDml (dmlText, failureMode) + (* DEBUG *) + (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) + val inval = + case Sql.parse Sql.dml dmlText of + SOME dmlParsed => + SOME (map (fn i => (case IM.find (indexToQueryNumArgs, i) of + SOME queryNumArgs => + (* DEBUG *) + ((* gunk := (queryNumArgs, dmlParsed) :: !gunk; *) + (i, invalidations (queryNumArgs, dmlParsed))) + (* TODO: fail more gracefully. *) + | NONE => raise Match)) + (SIMM.findList (tableToIndices, tableDml dmlParsed))) + | NONE => NONE + in + case inval of + (* TODO: fail more gracefully. *) + NONE => raise Match + | SOME invs => wrapLets (sequence (flushes invs @ [dmlExp])) + end + | e' => e' + in + (* DEBUG *) + (* gunk := []; *) + (fileMap doExp file, index, effs) + end + + +(***************) +(* Entry point *) +(***************) + +val inlineSql = + let + val doExp = + (* TODO: EQuery, too? *) + (* ASK: should this live in [MonoOpt]? *) + fn EDml ((ECase (disc, cases, {disc = dTyp, ...}), loc), failureMode) => + let + val newCases = map (fn (p, e) => (p, (EDml (e, failureMode), loc))) cases + in + ECase (disc, newCases, {disc = dTyp, result = (TRecord [], loc)}) + end + | e => e + in + fileMap doExp + end + fun insertAfterDatatypes ((decls, sideInfo), newDecls) = let val (datatypes, others) = List.partition (fn (DDatatype _, _) => true | _ => false) decls diff --git a/src/toy_cache.sml b/src/toy_cache.sml index cfde027b..377cae01 100644 --- a/src/toy_cache.sml +++ b/src/toy_cache.sml @@ -13,13 +13,7 @@ val optionStringTyp = (TOption stringTyp, dummyLoc) fun withTyp typ = map (fn exp => (exp, typ)) fun ffiAppCache' (func, index, argTyps) = - let - val m = "Sqlcache" - val f = func ^ Int.toString index - in - Settings.addEffectful (m, f); - EFfiApp (m, f, argTyps) - end + EFfiApp ("Sqlcache", func ^ Int.toString index, argTyps) fun check (index, keys) = ffiAppCache' ("check", index, withTyp stringTyp keys) -- cgit v1.2.3 From 6911f45af2dbc2770667294f010a30820ea5360f Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 14 Oct 2015 15:45:04 -0400 Subject: Do SQL and pure caching in the same pass. --- src/sqlcache.sml | 343 ++++++++++++++++++++++++++----------------------------- 1 file changed, 159 insertions(+), 184 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 1a4d4e97..99c89ff7 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -56,6 +56,19 @@ val doBind = | (env, MonoUtil.Exp.NamedE (x, n, t, eo, s)) => MonoEnv.pushENamed env x n t eo s | (env, MonoUtil.Exp.Datatype (x, n, cs)) => MonoEnv.pushDatatype env x n cs +(***********************) +(* General Combinators *) +(***********************) + +(* From the MLton wiki. *) +infix 3 <\ fun x <\ f = fn y => f (x, y) (* Left section *) +infix 3 \> fun f \> y = f y (* Left application *) +infixr 3 /> fun f /> y = fn x => f (x, y) (* Right section *) +infixr 3 fn e => fn _ => (doExp e, ())) file ()) +(* Takes a text expression and returns + newText: a new expression with any subexpressions that do computation + replaced with variables, + wrapLets: a function that wraps its argument expression with lets binding + those variables to their corresponding computations, and + numArgs: the number of such bindings. + The De Bruijn indices work out for [wrapLets (incRels numArgs newText)], but + the intention is that newText might be augmented. *) +fun factorOutNontrivial text = + let + val loc = dummyLoc + fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val chunks = Sql.chunkify text + val (newText, newVariables) = + (* Important that this is foldr (to oppose foldl below). *) + List.foldr + (fn (chunk, (qText, newVars)) => + (* Variable bound to the head of newVars will have the lowest index. *) + case chunk of + (* EPrim should always be a string in this case. *) + Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Sql.Exp e => + let + val n = length newVars + in + (* This is the (n+1)th new variable, so there are + already n new variables bound, so we increment + indices by n. *) + (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) + end + | Sql.String s => (strcat (stringExp s, qText), newVars)) + (stringExp "", []) + chunks + fun wrapLets e' = + (* Important that this is foldl (to oppose foldr above). *) + List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) + e' + newVariables + val numArgs = length newVariables + in + (newText, wrapLets, numArgs) + end + (**********************) (* Mono Type Checking *) @@ -599,9 +655,9 @@ fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = and typOfExp env (e', loc) = typOfExp' env e' -(*******************************) -(* Caching Pure Subexpressions *) -(*******************************) +(***********) +(* Caching *) +(***********) fun cacheWrap (env, exp, resultTyp, args, i) = let @@ -644,57 +700,6 @@ val freeVars = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 -structure InvalidationInfo :> sig - type t - val empty : t - val fromList : int list -> t - val toList : t -> int list - val union : t * t -> t - val unbind : t * int -> t option -end = struct - -(* Keep track of the minimum explicitly. NONE is the empty set. *) -type t = (int * IS.set) option - -val fromList = - List.foldl - (fn (n, NONE) => SOME (n, IS.singleton n) - | (n', SOME (n, ns)) => SOME (Int.min (n, n'), IS.add (ns, n'))) - NONE - -val empty = fromList [] - -val toList = - fn NONE => [] - | SOME (_, ns) => IS.listItems ns - -val union = - fn (SOME (n1, ns1), SOME (n2, ns2)) => SOME (Int.min (n1, n2), IS.union (ns1, ns2)) - | (NONE, info) => info - | (info, NONE) => info - -val unbind = - fn (SOME (n, ns), unbound) => - let - val n = n - unbound - in - if n < 0 - then NONE - else SOME (SOME (n, IS.map (fn n => n - unbound) ns)) - end - | _ => SOME NONE - -end - -val unionUnbind = - List.foldl - (fn (_, NONE) => NONE - | ((info, unbound), SOME infoAcc) => - case InvalidationInfo.unbind (info, unbound) of - NONE => NONE - | SOME info => SOME (InvalidationInfo.union (info, infoAcc))) - (SOME InvalidationInfo.empty) - datatype subexp = Pure of unit -> exp | Impure of exp val isImpure = @@ -708,38 +713,101 @@ val expOfSubexp = (* TODO: pick a number. *) val sizeWorthCaching = 5 -fun makeCache (env, exp', index) = +type state = (SIMM.multimap * (Sql.query * int) IntBinaryMap.map * int) + +fun incIndex (x, y, index) = (x, y, index+1) + +fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) = + fn q as {query = origQueryText, + state = resultTyp, + initial, body, tables, exps} => + let + val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText + (* Increment once for each new variable just made. This is where we + use the negative De Bruijn indices hack. *) + (* TODO: please don't use that hack. As anyone could have predicted, it + was incomprehensible a year later.... *) + val queryExp = incRels numArgs + (EQuery {query = newQueryText, + state = resultTyp, + initial = initial, + body = body, + tables = tables, + exps = exps}, + dummyLoc) + (* DEBUG *) + (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) + val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) + (* We use dummyTyp here. I think this is okay because databases don't + store (effectful) functions, but perhaps there's some pathalogical + corner case missing.... *) + fun safe bound = + not + o effectful effs + (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) + bound + env) + val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE + val attempt = + (* Ziv misses Haskell's do notation.... *) + textOfQuery queryExp + <\obind\> + (fn queryText => + (safe 0 queryText andalso safe 0 initial andalso safe 2 body) + <\oguard\> + Sql.parse Sql.query queryText + <\obind\> + (fn queryParsed => + (cacheWrap (env, queryExp, resultTyp, args, index)) + <\obind\> + (fn cachedExp => + SOME (wrapLets cachedExp, + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + index + 1))))) + in + case attempt of + SOME pair => pair + (* Even in this case, we have to increment index to avoid some bug, + but I forget exactly what it is or why this helps. *) + (* TODO: just use a reference for current index.... *) + | NONE => (EQuery q, incIndex state) + end + +fun cachePure (env, exp', (_, _, index)) = case typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - if expSize (exp', dummyLoc) < sizeWorthCaching - then NONE - else case List.foldr (fn ((_, _), NONE) => NONE - | ((n, typ), SOME args) => - case MonoFooify.urlify env ((ERel n, dummyLoc), typ) of - NONE => NONE - | SOME arg => SOME (arg :: args)) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) - (freeVars (exp', dummyLoc))) of - NONE => NONE - | SOME args => cacheWrap (env, (exp', dummyLoc), typ, args, index) - -fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int = + (expSize (exp', dummyLoc) < sizeWorthCaching) + + (List.foldr (fn (_, NONE) => NONE + | ((n, typ), SOME args) => + (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) + + (fn arg => SOME (arg :: args))) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) + (freeVars (exp', dummyLoc)))) + + (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, index)) + +fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) : subexp * state = let fun wrapBindN f (args : (MonoEnv.env * exp) list) = let - val (subexps, index) = ListUtil.foldlMap (pureCache effs) index args + val (subexps, state) = ListUtil.foldlMap (cache effs) state args fun mkExp () = (f (map expOfSubexp subexps), loc) in if List.exists isImpure subexps - then (Impure (mkExp ()), index) - else (Pure (fn () => case makeCache (env, f (map #2 args), index) of + then (Impure (mkExp ()), state) + else (Pure (fn () => case cachePure (env, f (map #2 args), state) of NONE => mkExp () | SOME e' => (e', loc)), (* Conservatively increment index. *) - index + 1) + incIndex state) end fun wrapBind1 f arg = wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] @@ -754,7 +822,7 @@ fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int | ESome (t, e) => wrap1 (fn e => ESome (t, e)) e | EFfiApp (s1, s2, args) => if ffiEffectful (s1, s2) - then (Impure exp, index) + then (Impure exp, state) else wrapN (fn es => EFfiApp (s1, s2, ListPair.map (fn (e, (_, t)) => (e, t)) (es, args))) (map #1 args) @@ -784,125 +852,32 @@ fun pureCache (effs : IS.set) ((env, exp as (exp', loc)), index) : subexp * int ((env, e1), (MonoEnv.pushERel env s t (SOME e1), e2)) (* ASK: | EClosure (n, es) => ? *) | EUnurlify (e, t, b) => wrap1 (fn e => EUnurlify (e, t, b)) e + | EQuery q => + let + val (exp', state) = cacheQuery effs env state q + in + (Impure (exp', loc), state) + end | _ => if effectful effs env exp - then (Impure exp, index) - else (Pure (fn () => (case makeCache (env, exp', index) of + then (Impure exp, state) + else (Pure (fn () => (case cachePure (env, exp', state) of NONE => exp' | SOME e' => e', loc)), - index + 1) + incIndex state) end -fun addPure (file, indexStart, effs) = +fun addCaching file = let - fun doTopLevelExp env exp index = + val effs = effectfulDecls file + fun doTopLevelExp env exp state = let - val (subexp, index) = pureCache effs ((env, exp), index) + val (subexp, state) = cache effs ((env, exp), state) in - (expOfSubexp subexp, index) + (expOfSubexp subexp, state) end in - #1 (fileTopLevelMapfoldB doTopLevelExp file indexStart) - end - - -(***********************) -(* Caching SQL Queries *) -(***********************) - -fun factorOutNontrivial text = - let - val loc = dummyLoc - fun strcat (e1, e2) = (EStrcat (e1, e2), loc) - val chunks = Sql.chunkify text - val (newText, newVariables) = - (* Important that this is foldr (to oppose foldl below). *) - List.foldr - (fn (chunk, (qText, newVars)) => - (* Variable bound to the head of newBs will have the lowest index. *) - case chunk of - Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) - | Sql.Exp e => - let - val n = length newVars - in - (* This is the (n+1)th new variable, so there are - already n new variables bound, so we increment - indices by n. *) - (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) - end - | Sql.String s => (strcat (stringExp s, qText), newVars)) - (stringExp "", []) - chunks - fun wrapLets e' = - (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) - e' - newVariables - val numArgs = length newVariables - in - (newText, wrapLets, numArgs) - end - -fun cacheQuery effs env (queryInfo as (tableToIndices, indexToQueryNumArgs, index)) = - fn e' as EQuery {query = origQueryText, - state = resultTyp, - initial, body, tables, exps} => - let - val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText - (* Increment once for each new variable just made. *) - val queryExp = incRels numArgs - (EQuery {query = newQueryText, - state = resultTyp, - initial = initial, - body = body, - tables = tables, - exps = exps}, - dummyLoc) - (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) - val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) - fun bind x f = Option.mapPartial f x - fun guard b x = if b then x else NONE - (* We use dummyTyp here. I think this is okay because databases don't - store (effectful) functions, but perhaps there's some pathalogical - corner case missing.... *) - fun safe bound = - not - o effectful effs - (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) - bound - env) - val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE - val attempt = - (* Ziv misses Haskell's do notation.... *) - bind (textOfQuery queryExp) (fn queryText => - guard (safe 0 queryText andalso safe 0 initial andalso safe 2 body) ( - bind (Sql.parse Sql.query queryText) (fn queryParsed => - bind (cacheWrap (env, queryExp, resultTyp, args, index)) (fn cachedExp => - SOME (wrapLets cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1)))))) - in - case attempt of - SOME pair => pair - (* We have to increment index conservatively. *) - (* TODO: just use a reference for current index.... *) - | NONE => (e', (tableToIndices, indexToQueryNumArgs, index + 1)) - end - | e' => (e', queryInfo) - -fun addChecking file = - let - val effs = effectfulDecls file - in - (fileAllMapfoldB (fn env => fn exp => fn state => cacheQuery effs env state exp) - file - (SIMM.empty, IM.empty, 0), - effs) + ((fileTopLevelMapfoldB doTopLevelExp file (SIMM.empty, IM.empty, 0)), effs) end @@ -995,7 +970,7 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = in (* DEBUG *) (* gunk := []; *) - (fileMap doExp file, index, effs) + fileMap doExp file end @@ -1026,7 +1001,7 @@ fun insertAfterDatatypes ((decls, sideInfo), newDecls) = (datatypes @ newDecls @ others, sideInfo) end -val go' = addPure o addFlushing o addChecking o inlineSql +val go' = addFlushing o addCaching o inlineSql fun go file = let -- cgit v1.2.3 From 0bcdd4b574807d2f7aea9231c0571770e7521561 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 14 Oct 2015 20:40:57 -0400 Subject: Factor out SQL simplification. --- src/sqlcache.sml | 227 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 128 insertions(+), 99 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 99c89ff7..fe8642d0 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -555,47 +555,71 @@ fun fileAllMapfoldB doExp file start = fun fileMap doExp file = #1 (fileAllMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) -(* Takes a text expression and returns - newText: a new expression with any subexpressions that do computation - replaced with variables, - wrapLets: a function that wraps its argument expression with lets binding - those variables to their corresponding computations, and - numArgs: the number of such bindings. - The De Bruijn indices work out for [wrapLets (incRels numArgs newText)], but - the intention is that newText might be augmented. *) -fun factorOutNontrivial text = +(* TODO: make this a bit prettier.... *) +val simplifySql = let - val loc = dummyLoc - fun strcat (e1, e2) = (EStrcat (e1, e2), loc) - val chunks = Sql.chunkify text - val (newText, newVariables) = - (* Important that this is foldr (to oppose foldl below). *) - List.foldr - (fn (chunk, (qText, newVars)) => - (* Variable bound to the head of newVars will have the lowest index. *) - case chunk of - (* EPrim should always be a string in this case. *) - Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) - | Sql.Exp e => - let - val n = length newVars - in - (* This is the (n+1)th new variable, so there are - already n new variables bound, so we increment - indices by n. *) - (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) - end - | Sql.String s => (strcat (stringExp s, qText), newVars)) - (stringExp "", []) - chunks - fun wrapLets e' = - (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) - e' - newVariables - val numArgs = length newVariables + fun factorOutNontrivial text = + let + val loc = dummyLoc + fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val chunks = Sql.chunkify text + val (newText, newVariables) = + (* Important that this is foldr (to oppose foldl below). *) + List.foldr + (fn (chunk, (qText, newVars)) => + (* Variable bound to the head of newVars will have the lowest index. *) + case chunk of + (* EPrim should always be a string in this case. *) + Sql.Exp (e as (EPrim _, _)) => (strcat (e, qText), newVars) + | Sql.Exp e => + let + val n = length newVars + in + (* This is the (n+1)th new variable, so there are + already n new variables bound, so we increment + indices by n. *) + (strcat ((ERel (~(n+1)), loc), qText), incRels n e :: newVars) + end + | Sql.String s => (strcat (stringExp s, qText), newVars)) + (stringExp "", []) + chunks + fun wrapLets e' = + (* Important that this is foldl (to oppose foldr above). *) + List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) + e' + newVariables + val numArgs = length newVariables + in + (newText, wrapLets, numArgs) + end + fun doExp exp' = + let + val text = case exp' of + EQuery {query = text, ...} => text + | EDml (text, _) => text + | _ => raise Match + val (newText, wrapLets, numArgs) = factorOutNontrivial text + val newExp' = case exp' of + EQuery q => EQuery {query = newText, + exps = #exps q, + tables = #tables q, + state = #state q, + body = #body q, + initial = #initial q} + | EDml (_, failureMode) => EDml (newText, failureMode) + | _ => raise Match + in + (* Increment once for each new variable just made. This is + where we use the negative De Bruijn indices hack. *) + (* TODO: please don't use that hack. As anyone could have + predicted, it was incomprehensible a year later.... *) + wrapLets (#1 (incRels numArgs (newExp', dummyLoc))) + end in - (newText, wrapLets, numArgs) + fileMap (fn exp' => case exp' of + EQuery _ => doExp exp' + | EDml _ => doExp exp' + | _ => exp') end @@ -659,6 +683,22 @@ and typOfExp env (e', loc) = typOfExp' env e' (* Caching *) (***********) +(* + +To get the invalidations for a dml, we need (each <- is list-monad-y): + * table <- dml + * cache <- table + * query <- cache + * inval <- (query, dml), +where inval is a list of query argument indices, so + * way to change query args in inval to cache args. +For now, the last one is just + * a map from query arg number to the corresponding free variable (per query) + * a map from free variable to cache arg number (per cache). +Both queries and caches should have IDs. + +*) + fun cacheWrap (env, exp, resultTyp, args, i) = let val loc = dummyLoc @@ -686,6 +726,14 @@ fun cacheWrap (env, exp, resultTyp, args, i) = end end +val maxFreeVar = + MonoUtil.Exp.foldB + {typ = #2, + exp = fn (bound, ERel n, v) => Int.max (v, n - bound) | (_, _, v) => v, + bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} + 0 + ~1 + val freeVars = IS.listItems o MonoUtil.Exp.foldB @@ -700,14 +748,14 @@ val freeVars = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 -datatype subexp = Pure of unit -> exp | Impure of exp +datatype subexp = Cachable of unit -> exp | Impure of exp val isImpure = - fn Pure _ => false + fn Cachable _ => false | Impure _ => true val expOfSubexp = - fn Pure f => f () + fn Cachable f => f () | Impure e => e (* TODO: pick a number. *) @@ -718,23 +766,12 @@ type state = (SIMM.multimap * (Sql.query * int) IntBinaryMap.map * int) fun incIndex (x, y, index) = (x, y, index+1) fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) = - fn q as {query = origQueryText, - state = resultTyp, - initial, body, tables, exps} => + fn q as {query = queryText, + state = resultTyp, + initial, body, tables, exps} => let - val (newQueryText, wrapLets, numArgs) = factorOutNontrivial origQueryText - (* Increment once for each new variable just made. This is where we - use the negative De Bruijn indices hack. *) - (* TODO: please don't use that hack. As anyone could have predicted, it - was incomprehensible a year later.... *) - val queryExp = incRels numArgs - (EQuery {query = newQueryText, - state = resultTyp, - initial = initial, - body = body, - tables = tables, - exps = exps}, - dummyLoc) + val numArgs = maxFreeVar queryText + 1 + val queryExp = (EQuery q, dummyLoc) (* DEBUG *) (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) @@ -747,26 +784,22 @@ fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) bound env) - val textOfQuery = fn (EQuery {query, ...}, _) => SOME query | _ => NONE val attempt = (* Ziv misses Haskell's do notation.... *) - textOfQuery queryExp + (safe 0 queryText andalso safe 0 initial andalso safe 2 body) + <\oguard\> + Sql.parse Sql.query queryText <\obind\> - (fn queryText => - (safe 0 queryText andalso safe 0 initial andalso safe 2 body) - <\oguard\> - Sql.parse Sql.query queryText + (fn queryParsed => + (cacheWrap (env, queryExp, resultTyp, args, index)) <\obind\> - (fn queryParsed => - (cacheWrap (env, queryExp, resultTyp, args, index)) - <\obind\> - (fn cachedExp => - SOME (wrapLets cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1))))) + (fn cachedExp => + SOME (cachedExp, + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + index + 1)))) in case attempt of SOME pair => pair @@ -777,20 +810,20 @@ fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) end fun cachePure (env, exp', (_, _, index)) = - case typOfExp' env exp' of + case (expSize (exp', dummyLoc) > sizeWorthCaching) + + typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - (expSize (exp', dummyLoc) < sizeWorthCaching) - - (List.foldr (fn (_, NONE) => NONE - | ((n, typ), SOME args) => - (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) - - (fn arg => SOME (arg :: args))) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) - (freeVars (exp', dummyLoc)))) + (List.foldr (fn (_, NONE) => NONE + | ((n, typ), SOME args) => + (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) + + (fn arg => SOME (arg :: args))) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) + (freeVars (exp', dummyLoc)))) (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, index)) @@ -803,9 +836,9 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) : subexp * state = in if List.exists isImpure subexps then (Impure (mkExp ()), state) - else (Pure (fn () => case cachePure (env, f (map #2 args), state) of - NONE => mkExp () - | SOME e' => (e', loc)), + else (Cachable (fn () => case cachePure (env, f (map #2 args), state) of + NONE => mkExp () + | SOME e' => (e', loc)), (* Conservatively increment index. *) incIndex state) end @@ -860,10 +893,10 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) : subexp * state = end | _ => if effectful effs env exp then (Impure exp, state) - else (Pure (fn () => (case cachePure (env, exp', state) of - NONE => exp' - | SOME e' => e', - loc)), + else (Cachable (fn () => (case cachePure (env, exp', state) of + NONE => exp' + | SOME e' => e', + loc)), incIndex state) end @@ -939,14 +972,10 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = val flushes = List.concat o map (fn (i, argss) => map (fn args => flush (i, args)) argss) val doExp = - fn EDml (origDmlText, failureMode) => + fn dmlExp as EDml (dmlText, failureMode) => let (* DEBUG *) (* val () = gunk' := origDmlText :: !gunk' *) - val (newDmlText, wrapLets, numArgs) = factorOutNontrivial origDmlText - val dmlText = incRels numArgs newDmlText - val dmlExp = EDml (dmlText, failureMode) - (* DEBUG *) (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) val inval = case Sql.parse Sql.dml dmlText of @@ -964,7 +993,7 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = case inval of (* TODO: fail more gracefully. *) NONE => raise Match - | SOME invs => wrapLets (sequence (flushes invs @ [dmlExp])) + | SOME invs => sequence (flushes invs @ [dmlExp]) end | e' => e' in @@ -1001,7 +1030,7 @@ fun insertAfterDatatypes ((decls, sideInfo), newDecls) = (datatypes @ newDecls @ others, sideInfo) end -val go' = addFlushing o addCaching o inlineSql +val go' = addFlushing o addCaching o simplifySql o inlineSql fun go file = let -- cgit v1.2.3 From f3ca4cbdd84e1d86f47d1cbabc8ad881e0adfeb2 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 14 Oct 2015 23:10:10 -0400 Subject: Thread state through addCaching more carefully. --- src/sqlcache.sml | 151 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 76 insertions(+), 75 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index fe8642d0..42bd724c 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -15,12 +15,12 @@ fun iterate f n x = if n < 0 then x else iterate f (n-1) (f x) -(* Filled in by [cacheWrap]. *) -val ffiInfo : {index : int, params : int} list ref = ref [] +(* Filled in by [addFlushing]. *) +val ffiInfoRef : {index : int, params : int} list ref = ref [] -fun resetFfiInfo () = ffiInfo := [] +fun resetFfiInfo () = ffiInfoRef := [] -fun getFfiInfo () = !ffiInfo +fun getFfiInfo () = !ffiInfoRef (* Some FFIs have writing as their only effect, which the caching records. *) val ffiEffectful = @@ -61,8 +61,6 @@ val doBind = (***********************) (* From the MLton wiki. *) -infix 3 <\ fun x <\ f = fn y => f (x, y) (* Left section *) -infix 3 \> fun f \> y = f y (* Left application *) infixr 3 /> fun f /> y = fn x => f (x, y) (* Right section *) infixr 3 NONE | SOME urlified => let - val () = ffiInfo := {index = i, params = length args} :: !ffiInfo (* We ensure before this step that all arguments aren't effectful. by turning them into local variables as needed. *) val argsInc = map (incRels 1) args - val check = (check (i, args), loc) - val store = (store (i, argsInc, urlified), loc) + val check = (check (index, args), loc) + val store = (store (index, argsInc, urlified), loc) in - SOME (ECase - (check, - [((PNone stringTyp, loc), - (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), - (* Boolean is false because we're not unurlifying from a cookie. *) - (EUnurlify (rel0, resultTyp, false), loc))], - {disc = (TOption stringTyp, loc), result = resultTyp})) + SOME ((ECase + (check, + [((PNone stringTyp, loc), + (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, resultTyp, false), loc))], + {disc = (TOption stringTyp, loc), result = resultTyp})), + (#1 state, + #2 state, + {index = index, params = length args} :: ffiInfo, + index + 1)) end end @@ -748,28 +752,30 @@ val freeVars = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 -datatype subexp = Cachable of unit -> exp | Impure of exp +type state = (SIMM.multimap + * (Sql.query * int) IntBinaryMap.map + * {index : int, params : int} list + * int) + +datatype subexp = Cachable of state -> (exp * state) | Impure of exp val isImpure = fn Cachable _ => false | Impure _ => true -val expOfSubexp = - fn Cachable f => f () - | Impure e => e +val runSubexp : subexp * state -> exp * state = + fn (Cachable f, state) => f state + | (Impure e, state) => (e, state) (* TODO: pick a number. *) val sizeWorthCaching = 5 -type state = (SIMM.multimap * (Sql.query * int) IntBinaryMap.map * int) - -fun incIndex (x, y, index) = (x, y, index+1) - -fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) = - fn q as {query = queryText, - state = resultTyp, - initial, body, tables, exps} => +fun cacheQuery (effs, env, state, q) : (exp' * state) = let + val (tableToIndices, indexToQueryNumArgs, ffiInfo, index) = state + val {query = queryText, + state = resultTyp, + initial, body, tables, exps} = q val numArgs = maxFreeVar queryText + 1 val queryExp = (EQuery q, dummyLoc) (* DEBUG *) @@ -787,29 +793,27 @@ fun cacheQuery effs env (state as (tableToIndices, indexToQueryNumArgs, index)) val attempt = (* Ziv misses Haskell's do notation.... *) (safe 0 queryText andalso safe 0 initial andalso safe 2 body) - <\oguard\> - Sql.parse Sql.query queryText - <\obind\> - (fn queryParsed => - (cacheWrap (env, queryExp, resultTyp, args, index)) - <\obind\> - (fn cachedExp => - SOME (cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - index + 1)))) + + Sql.parse Sql.query queryText + + (fn queryParsed => + (cacheWrap (env, queryExp, resultTyp, args, state)) + + (fn (cachedExp, state) => + SOME (cachedExp, + (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) + tableToIndices + (tablesQuery queryParsed), + IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), + #3 state, + #4 state)))) in case attempt of SOME pair => pair - (* Even in this case, we have to increment index to avoid some bug, - but I forget exactly what it is or why this helps. *) - (* TODO: just use a reference for current index.... *) - | NONE => (EQuery q, incIndex state) + | NONE => (EQuery q, state) end -fun cachePure (env, exp', (_, _, index)) = +fun cachePure (env, exp', state as (_, _, _, index)) = case (expSize (exp', dummyLoc) > sizeWorthCaching) typOfExp' env exp' of @@ -825,22 +829,23 @@ fun cachePure (env, exp', (_, _, index)) = (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) (freeVars (exp', dummyLoc)))) - (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, index)) + (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, state)) -fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) : subexp * state = +fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = let - fun wrapBindN f (args : (MonoEnv.env * exp) list) = + fun wrapBindN (f : exp list -> exp') (args : (MonoEnv.env * exp) list) = let val (subexps, state) = ListUtil.foldlMap (cache effs) state args - fun mkExp () = (f (map expOfSubexp subexps), loc) + fun mkExp state = mapFst (fn exps => (f exps, loc)) + (ListUtil.foldlMap runSubexp state subexps) in if List.exists isImpure subexps - then (Impure (mkExp ()), state) - else (Cachable (fn () => case cachePure (env, f (map #2 args), state) of - NONE => mkExp () - | SOME e' => (e', loc)), - (* Conservatively increment index. *) - incIndex state) + then mapFst Impure (mkExp state) + else (Cachable (fn state => + case cachePure (env, f (map #2 args), state) of + NONE => mkExp state + | SOME (e', state) => ((e', loc), state)), + state) end fun wrapBind1 f arg = wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] @@ -887,30 +892,25 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) : subexp * state = | EUnurlify (e, t, b) => wrap1 (fn e => EUnurlify (e, t, b)) e | EQuery q => let - val (exp', state) = cacheQuery effs env state q + val (exp', state) = cacheQuery (effs, env, state, q) in (Impure (exp', loc), state) end | _ => if effectful effs env exp then (Impure exp, state) - else (Cachable (fn () => (case cachePure (env, exp', state) of - NONE => exp' - | SOME e' => e', - loc)), - incIndex state) + else (Cachable (fn state => + case cachePure (env, exp', state) of + NONE => ((exp', loc), state) + | SOME (exp', state) => ((exp', loc), state)), + state) end fun addCaching file = let val effs = effectfulDecls file - fun doTopLevelExp env exp state = - let - val (subexp, state) = cache effs ((env, exp), state) - in - (expOfSubexp subexp, state) - end + fun doTopLevelExp env exp state = runSubexp (cache effs ((env, exp), state)) in - ((fileTopLevelMapfoldB doTopLevelExp file (SIMM.empty, IM.empty, 0)), effs) + ((fileTopLevelMapfoldB doTopLevelExp file (SIMM.empty, IM.empty, [], 0)), effs) end @@ -967,7 +967,7 @@ val invalidations = Invalidations.invalidations (* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) (* val gunk' : exp list ref = ref [] *) -fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = +fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, ffiInfo, index)), effs) = let val flushes = List.concat o map (fn (i, argss) => map (fn args => flush (i, args)) argss) @@ -999,13 +999,14 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, index)), effs) = in (* DEBUG *) (* gunk := []; *) + ffiInfoRef := ffiInfo; fileMap doExp file end -(***************) -(* Entry point *) -(***************) +(************************) +(* Compiler Entry Point *) +(************************) val inlineSql = let -- cgit v1.2.3 From f425194d947691ceeaad9ec73fdc7c2c176ebfe3 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 15 Oct 2015 00:52:04 -0400 Subject: Make SQL caches use more of the pure caching machinery, but it's brittle. --- caching-tests/test.ur | 11 ++++++++ caching-tests/test.urs | 1 + src/sqlcache.sml | 69 +++++++++++++++++++++++++------------------------- 3 files changed, 46 insertions(+), 35 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 578d59b3..00f05768 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -11,6 +11,17 @@ fun cache id = | Some row => {[row.Tab.Val]}} +fun cache2 id v = + res <- oneOrNoRows (SELECT tab.Val + FROM tab + WHERE tab.Id = {[id]} AND tab.Val = {[v]}); + return + Reading {[id]}. + {case res of + None => Nope, that's not it. + | Some _ => Hooray! You guessed it!} + + fun flush id = dml (UPDATE tab SET Val = Val * (Id + 2) / Val - 3 diff --git a/caching-tests/test.urs b/caching-tests/test.urs index e9e09ac8..fc23c47d 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -1,3 +1,4 @@ val cache : int -> transaction page +val cache2 : int -> int -> transaction page val flush : int -> transaction page val flush17 : transaction page diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 42bd724c..f98ff4bb 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -675,6 +675,7 @@ fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = | ELet (s, t, e1, e2) => typOfExp (MonoEnv.pushERel env s t (SOME e1)) e2 | EClosure _ => NONE | EUnurlify (_, t, _) => SOME t + | EQuery {state, ...} => SOME state | _ => NONE and typOfExp env (e', loc) = typOfExp' env e' @@ -770,17 +771,35 @@ val runSubexp : subexp * state -> exp * state = (* TODO: pick a number. *) val sizeWorthCaching = 5 +val worthCaching = + fn EQuery _ => true + | exp' => expSize (exp', dummyLoc) > sizeWorthCaching + +fun cachePure (env, exp', state as (_, _, _, index)) = + case (worthCaching exp') + + typOfExp' env exp' of + NONE => NONE + | SOME (TFun _, _) => NONE + | SOME typ => + (List.foldr (fn (_, NONE) => NONE + | ((n, typ), SOME args) => + (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) + + (fn arg => SOME (arg :: args))) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) + (ListMergeSort.sort op> (freeVars (exp', dummyLoc))))) + + (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, state)) + fun cacheQuery (effs, env, state, q) : (exp' * state) = let val (tableToIndices, indexToQueryNumArgs, ffiInfo, index) = state - val {query = queryText, - state = resultTyp, - initial, body, tables, exps} = q + val {query = queryText, initial, body, ...} = q val numArgs = maxFreeVar queryText + 1 - val queryExp = (EQuery q, dummyLoc) (* DEBUG *) (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) - val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) (* We use dummyTyp here. I think this is okay because databases don't store (effectful) functions, but perhaps there's some pathalogical corner case missing.... *) @@ -790,6 +809,8 @@ fun cacheQuery (effs, env, state, q) : (exp' * state) = (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) bound env) + val {state = resultTyp, ...} = q + val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) val attempt = (* Ziv misses Haskell's do notation.... *) (safe 0 queryText andalso safe 0 initial andalso safe 2 body) @@ -797,7 +818,7 @@ fun cacheQuery (effs, env, state, q) : (exp' * state) = Sql.parse Sql.query queryText (fn queryParsed => - (cacheWrap (env, queryExp, resultTyp, args, state)) + (cachePure (env, EQuery q, state)) (fn (cachedExp, state) => SOME (cachedExp, @@ -813,24 +834,6 @@ fun cacheQuery (effs, env, state, q) : (exp' * state) = | NONE => (EQuery q, state) end -fun cachePure (env, exp', state as (_, _, _, index)) = - case (expSize (exp', dummyLoc) > sizeWorthCaching) - - typOfExp' env exp' of - NONE => NONE - | SOME (TFun _, _) => NONE - | SOME typ => - (List.foldr (fn (_, NONE) => NONE - | ((n, typ), SOME args) => - (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) - - (fn arg => SOME (arg :: args))) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) - (freeVars (exp', dummyLoc)))) - - (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, state)) - fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = let fun wrapBindN (f : exp list -> exp') (args : (MonoEnv.env * exp) list) = @@ -896,13 +899,13 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = in (Impure (exp', loc), state) end - | _ => if effectful effs env exp - then (Impure exp, state) - else (Cachable (fn state => + | _ => (if effectful effs env exp + then Impure exp + else Cachable (fn state => case cachePure (env, exp', state) of - NONE => ((exp', loc), state) - | SOME (exp', state) => ((exp', loc), state)), - state) + NONE => ((exp', loc), state) + | SOME (exp', state) => ((exp', loc), state)), + state) end fun addCaching file = @@ -934,11 +937,7 @@ structure Invalidations = struct loc) fun eqsToInvalidation numArgs eqs = - let - fun inv n = if n < 0 then [] else IM.find (eqs, n) :: inv (n - 1) - in - inv (numArgs - 1) - end + List.tabulate (numArgs, (fn n => IM.find (eqs, n))) (* Tests if [ys] makes [xs] a redundant cache invalidation. [NONE] here represents unknown, which means a wider invalidation. *) -- cgit v1.2.3 From 78acba6decb79af464805a1ad3d81de56ef16151 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 21 Oct 2015 09:18:36 -0400 Subject: First draft of cache consolidation. --- src/sqlcache.sml | 447 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 310 insertions(+), 137 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index f98ff4bb..aec97bce 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -56,20 +56,34 @@ val doBind = | (env, MonoUtil.Exp.NamedE (x, n, t, eo, s)) => MonoEnv.pushENamed env x n t eo s | (env, MonoUtil.Exp.Datatype (x, n, cs)) => MonoEnv.pushDatatype env x n cs -(***********************) -(* General Combinators *) -(***********************) +val dummyLoc = ErrorMsg.dummySpan + + +(*********************) +(* General Utilities *) +(*********************) (* From the MLton wiki. *) infixr 3 /> fun f /> y = fn x => f (x, y) (* Right section *) infixr 3 SOME (f x) | _ => NONE +fun omap2 f = fn (SOME x, SOME y) => SOME (f (x,y)) | _ => NONE +fun osequence ys = List.foldr (omap2 op::) (SOME []) ys -fun mapFst f (x, y) = (f x, y) - +fun indexOf test = + let + fun f n = + fn [] => NONE + | (x::xs) => if test x then SOME n else f (n+1) xs + in + f 0 + end (*******************) (* Effect Analysis *) @@ -289,6 +303,170 @@ end structure AtomOptionKey = OptionKeyFn(AtomExpKey) +val rec tablesOfQuery = + fn Sql.Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) + | Sql.Union (q1, q2) => SS.union (tablesOfQuery q1, tablesOfQuery q2) + +val tableOfDml = + fn Sql.Insert (tab, _) => tab + | Sql.Delete (tab, _) => tab + | Sql.Update (tab, _, _) => tab + +val freeVars = + MonoUtil.Exp.foldB + {typ = #2, + exp = fn (bound, ERel n, vars) => if n < bound + then vars + else IS.add (vars, n - bound) + | (_, _, vars) => vars, + bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} + 0 + IS.empty + +datatype unbind = Known of exp | Unknowns of int + +structure InvalInfo :> sig + type t + type state = {tableToIndices : SIMM.multimap, + indexToInvalInfo : (t * int) IntBinaryMap.map, + ffiInfo : {index : int, params : int} list, + index : int} + val empty : t + val singleton : Sql.query -> t + val query : t -> Sql.query + val orderArgs : t * IS.set -> int list + val unbind : t * unbind -> t option + val union : t * t -> t + val updateState : t * int * state -> state +end = struct + + type t = Sql.query list + + type state = {tableToIndices : SIMM.multimap, + indexToInvalInfo : (t * int) IntBinaryMap.map, + ffiInfo : {index : int, params : int} list, + index : int} + + val empty = [] + + fun singleton q = [q] + + val union = op@ + + (* Need lift', etc. because we don't have rank-2 polymorphism. This should + probably use a functor, but this works for now. *) + fun traverseSqexp (pure, _, lift, _, lift'', lift2, _) f = + let + val rec tr = + fn Sql.SqNot se => lift Sql.SqNot (tr se) + | Sql.Binop (r, se1, se2) => + lift2 (fn (trse1, trse2) => Sql.Binop (r, trse1, trse2)) (tr se1, tr se2) + | Sql.SqKnown se => lift Sql.SqKnown (tr se) + | Sql.Inj (e', loc) => lift'' (fn fe' => Sql.Inj (fe', loc)) (f e') + | Sql.SqFunc (s, se) => lift (fn trse => Sql.SqFunc (s, trse)) (tr se) + | se => pure se + in + tr + end + + fun traverseQuery (ops as (_, pure', _, lift', _, _, lift2')) f = + let + val rec mp = + fn Sql.Query1 q => + (case #Where q of + NONE => pure' (Sql.Query1 q) + | SOME se => + lift' (fn mpse => Sql.Query1 {Select = #Select q, + From = #From q, + Where = SOME mpse}) + (traverseSqexp ops f se)) + | Sql.Union (q1, q2) => lift2' Sql.Union (mp q1, mp q2) + in + mp + end + + fun foldMapQuery plus zero = traverseQuery (fn _ => zero, + fn _ => zero, + fn _ => fn x => x, + fn _ => fn x => x, + fn _ => fn x => x, + fn _ => plus, + fn _ => plus) + + val omapQuery = traverseQuery (SOME, SOME, omap, omap, omap, omap2, omap2) + + val varsOfQuery = foldMapQuery IS.union + IS.empty + (fn e' => freeVars (e', dummyLoc)) + + val varsOfList = + fn [] => IS.empty + | (q::qs) => varsOfQuery (List.foldl Sql.Union q qs) + + fun orderArgs (qs, vars) = + let + val invalVars = varsOfList qs + in + (* Put arguments we might invalidate by first. *) + IS.listItems invalVars @ IS.listItems (IS.difference (vars, invalVars)) + end + + (* As a kludge, we rename the variables in the query to correspond to the + argument of the cache they're part of. *) + val query = + fn (q::qs) => + let + val q = List.foldl Sql.Union q qs + val ns = IS.listItems (varsOfQuery q) + val rename = + fn ERel n => omap ERel (indexOf (fn n' => n' = n) ns) + | _ => raise Match + in + case omapQuery rename q of + SOME q => q + (* We should never get NONE because indexOf should never fail. *) + | NONE => raise Match + end + (* We should never reach this case because [updateState] won't put + anything in the state if there are no queries. *) + | [] => raise Match + + fun unbind1 ub = + case ub of + Known (e', loc) => + let + val replaceRel0 = case e' of + ERel m => SOME (ERel m) + | _ => NONE + in + omapQuery (fn ERel 0 => replaceRel0 + | ERel n => SOME (ERel (n-1)) + | _ => raise Match) + end + | Unknowns k => + omapQuery (fn ERel n => if n >= k then NONE else SOME (ERel (n-k)) + | _ => raise Match) + + fun unbind (qs, ub) = + case ub of + (* Shortcut if nothing's changing. *) + Unknowns 0 => SOME qs + | _ => osequence (map (unbind1 ub) qs) + + fun updateState ((qs, numArgs, state as {index, ...}) : t * int * state) = + {tableToIndices = List.foldr (fn (q, acc) => + SS.foldl (fn (tab, acc) => + SIMM.insert (acc, tab, index)) + acc + (tablesOfQuery q)) + (#tableToIndices state) + qs, + indexToInvalInfo = IM.insert (#indexToInvalInfo state, index, (qs, numArgs)), + ffiInfo = {index = index, params = numArgs} :: #ffiInfo state, + index = index + 1} + +end + structure UF = UnionFindFn(AtomExpKey) structure ConflictMaps = struct @@ -388,8 +566,7 @@ structure ConflictMaps = struct equivalence classes, so the [#1] could be [#2]. *) val mergeEqs : (atomExp IntBinaryMap.map option list -> atomExp IntBinaryMap.map option) = - List.foldr (fn (SOME eqs, SOME acc) => SOME (IM.unionWith #1 (eqs, acc)) | _ => NONE) - (SOME IM.empty) + List.foldr (omap2 (IM.unionWith #1)) (SOME IM.empty) val simplify = map TS.listItems @@ -459,15 +636,6 @@ val rec dmlToFormula = Combo (Conj, [mark fVals, fWhere])])) end -val rec tablesQuery = - fn Sql.Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) - | Sql.Union (q1, q2) => SS.union (tablesQuery q1, tablesQuery q2) - -val tableDml = - fn Sql.Insert (tab, _) => tab - | Sql.Delete (tab, _) => tab - | Sql.Update (tab, _, _) => tab - (*************************************) (* Program Instrumentation Utilities *) @@ -482,8 +650,6 @@ val varName = val {check, store, flush, ...} = getCache () -val dummyLoc = ErrorMsg.dummySpan - val dummyTyp = (TRecord [], dummyLoc) fun stringExp s = (EPrim (Prim.String (Prim.Normal, s)), dummyLoc) @@ -701,12 +867,28 @@ Both queries and caches should have IDs. *) -fun cacheWrap (env, exp, resultTyp, args, state as (_, _, ffiInfo, index)) = +type state = InvalInfo.state + +datatype subexp = Cachable of InvalInfo.t * (state -> exp * state) | Impure of exp + +val isImpure = + fn Cachable _ => false + | Impure _ => true + +val runSubexp : subexp * state -> exp * state = + fn (Cachable (_, f), state) => f state + | (Impure e, state) => (e, state) + +val invalInfoOfSubexp = + fn Cachable (invalInfo, _) => invalInfo + | Impure _ => raise Match + +fun cacheWrap (env, exp, typ, args, index) = let val loc = dummyLoc val rel0 = (ERel 0, loc) in - case MonoFooify.urlify env (rel0, resultTyp) of + case MonoFooify.urlify env (rel0, typ) of NONE => NONE | SOME urlified => let @@ -716,58 +898,18 @@ fun cacheWrap (env, exp, resultTyp, args, state as (_, _, ffiInfo, index)) = val check = (check (index, args), loc) val store = (store (index, argsInc, urlified), loc) in - SOME ((ECase - (check, - [((PNone stringTyp, loc), - (ELet (varName "q", resultTyp, exp, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), - (* Boolean is false because we're not unurlifying from a cookie. *) - (EUnurlify (rel0, resultTyp, false), loc))], - {disc = (TOption stringTyp, loc), result = resultTyp})), - (#1 state, - #2 state, - {index = index, params = length args} :: ffiInfo, - index + 1)) + SOME (ECase (check, + [((PNone stringTyp, loc), + (ELet (varName "q", typ, exp, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), + (* Boolean is false because we're not unurlifying from a cookie. *) + (EUnurlify (rel0, typ, false), loc))], + {disc = (TOption stringTyp, loc), result = typ})) end end -val maxFreeVar = - MonoUtil.Exp.foldB - {typ = #2, - exp = fn (bound, ERel n, v) => Int.max (v, n - bound) | (_, _, v) => v, - bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} - 0 - ~1 - -val freeVars = - IS.listItems - o MonoUtil.Exp.foldB - {typ = #2, - exp = fn (bound, ERel n, vars) => if n < bound - then vars - else IS.add (vars, n - bound) - | (_, _, vars) => vars, - bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} - 0 - IS.empty - val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 -type state = (SIMM.multimap - * (Sql.query * int) IntBinaryMap.map - * {index : int, params : int} list - * int) - -datatype subexp = Cachable of state -> (exp * state) | Impure of exp - -val isImpure = - fn Cachable _ => false - | Impure _ => true - -val runSubexp : subexp * state -> exp * state = - fn (Cachable f, state) => f state - | (Impure e, state) => (e, state) - (* TODO: pick a number. *) val sizeWorthCaching = 5 @@ -775,31 +917,33 @@ val worthCaching = fn EQuery _ => true | exp' => expSize (exp', dummyLoc) > sizeWorthCaching -fun cachePure (env, exp', state as (_, _, _, index)) = +fun cacheExp ((env, exp', invalInfo, state) : MonoEnv.env * exp' * InvalInfo.t * state) = case (worthCaching exp') typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - (List.foldr (fn (_, NONE) => NONE - | ((n, typ), SOME args) => - (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) - - (fn arg => SOME (arg :: args))) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) - (ListMergeSort.sort op> (freeVars (exp', dummyLoc))))) - - (fn args => cacheWrap (env, (exp', dummyLoc), typ, args, state)) + let + val ns = InvalInfo.orderArgs (invalInfo, freeVars (exp', dummyLoc)) + val numArgs = length ns + in (List.foldr (fn (_, NONE) => NONE + | ((n, typ), SOME args) => + (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) + + (fn arg => SOME (arg :: args))) + (SOME []) + (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) ns)) + + (fn args => + (cacheWrap (env, (exp', dummyLoc), typ, args, #index state)) + + (fn cachedExp => + SOME (cachedExp, InvalInfo.updateState (invalInfo, numArgs, state)))) + end -fun cacheQuery (effs, env, state, q) : (exp' * state) = +fun cacheQuery (effs, env, q) : subexp = let - val (tableToIndices, indexToQueryNumArgs, ffiInfo, index) = state - val {query = queryText, initial, body, ...} = q - val numArgs = maxFreeVar queryText + 1 - (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) (* We use dummyTyp here. I think this is okay because databases don't store (effectful) functions, but perhaps there's some pathalogical corner case missing.... *) @@ -809,8 +953,9 @@ fun cacheQuery (effs, env, state, q) : (exp' * state) = (iterate (fn env => MonoEnv.pushERel env "_" dummyTyp NONE) bound env) - val {state = resultTyp, ...} = q - val args = List.tabulate (numArgs, fn n => (ERel n, dummyLoc)) + val {query = queryText, initial, body, ...} = q + (* DEBUG *) + (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) val attempt = (* Ziv misses Haskell's do notation.... *) (safe 0 queryText andalso safe 0 initial andalso safe 2 body) @@ -818,45 +963,64 @@ fun cacheQuery (effs, env, state, q) : (exp' * state) = Sql.parse Sql.query queryText (fn queryParsed => - (cachePure (env, EQuery q, state)) - - (fn (cachedExp, state) => - SOME (cachedExp, - (SS.foldr (fn (tab, qi) => SIMM.insert (qi, tab, index)) - tableToIndices - (tablesQuery queryParsed), - IM.insert (indexToQueryNumArgs, index, (queryParsed, numArgs)), - #3 state, - #4 state)))) + let + val invalInfo = InvalInfo.singleton queryParsed + fun mkExp state = + case cacheExp (env, EQuery q, invalInfo, state) of + NONE => ((EQuery q, dummyLoc), state) + | SOME (cachedExp, state) => ((cachedExp, dummyLoc), state) + in + SOME (Cachable (invalInfo, mkExp)) + end) in case attempt of - SOME pair => pair - | NONE => (EQuery q, state) + NONE => Impure (EQuery q, dummyLoc) + | SOME subexp => subexp end -fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = +fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = let - fun wrapBindN (f : exp list -> exp') (args : (MonoEnv.env * exp) list) = + fun wrapBindN (f : exp list -> exp') + (args : ((MonoEnv.env * exp) * unbind) list) = let - val (subexps, state) = ListUtil.foldlMap (cache effs) state args + val (subexps, state) = + ListUtil.foldlMap (cacheTree effs) + state + (map #1 args) fun mkExp state = mapFst (fn exps => (f exps, loc)) (ListUtil.foldlMap runSubexp state subexps) + val attempt = + if List.exists isImpure subexps + then NONE + else (List.foldl (omap2 InvalInfo.union) + (SOME InvalInfo.empty) + (ListPair.map + (fn (subexp, (_, unbinds)) => + InvalInfo.unbind (invalInfoOfSubexp subexp, unbinds)) + (subexps, args))) + + (fn invalInfo => + SOME (Cachable (invalInfo, + fn state => + case cacheExp (env, + f (map (#2 o #1) args), + invalInfo, + state) of + NONE => mkExp state + | SOME (e', state) => ((e', loc), state)), + state)) in - if List.exists isImpure subexps - then mapFst Impure (mkExp state) - else (Cachable (fn state => - case cachePure (env, f (map #2 args), state) of - NONE => mkExp state - | SOME (e', state) => ((e', loc), state)), - state) + case attempt of + SOME (subexp, state) => (subexp, state) + | NONE => mapFst Impure (mkExp state) end fun wrapBind1 f arg = wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] fun wrapBind2 f (arg1, arg2) = wrapBindN (fn [arg1, arg2] => f (arg1, arg2) | _ => raise Match) [arg1, arg2] - fun wrapN f es = wrapBindN f (map (fn e => (env, e)) es) - fun wrap1 f e = wrapBind1 f (env, e) - fun wrap2 f (e1, e2) = wrapBind2 f ((env, e1), (env, e2)) + fun wrapN f es = wrapBindN f (map (fn e => ((env, e), Unknowns 0)) es) + fun wrap1 f e = wrapBind1 f ((env, e), Unknowns 0) + fun wrap2 f (e1, e2) = wrapBind2 f (((env, e1), Unknowns 0), ((env, e2), Unknowns 0)) in case exp' of ECon (dk, pc, SOME e) => wrap1 (fn e => ECon (dk, pc, SOME e)) e @@ -870,7 +1034,7 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = | EApp (e1, e2) => wrap2 EApp (e1, e2) | EAbs (s, t1, t2, e) => wrapBind1 (fn e => EAbs (s, t1, t2, e)) - (MonoEnv.pushERel env s t1 NONE, e) + ((MonoEnv.pushERel env s t1 NONE, e), Unknowns 1) | EUnop (s, e) => wrap1 (fn e => EUnop (s, e)) e | EBinop (bi, s, e1, e2) => wrap2 (fn (e1, e2) => EBinop (bi, s, e1, e2)) (e1, e2) | ERecord fields => @@ -883,26 +1047,26 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = (ListPair.map (fn (e, (p, _)) => (p, e)) (es, cases)), {disc = disc, result = result}) | _ => raise Match) - ((env, e) :: map (fn (p, e) => (MonoEnv.patBinds env p, e)) cases) + (((env, e), Unknowns 0) + :: map (fn (p, e) => + ((MonoEnv.patBinds env p, e), Unknowns (MonoEnv.patBindsN p))) + cases) | EStrcat (e1, e2) => wrap2 EStrcat (e1, e2) (* We record page writes, so they're cachable. *) | EWrite e => wrap1 EWrite e | ESeq (e1, e2) => wrap2 ESeq (e1, e2) | ELet (s, t, e1, e2) => wrapBind2 (fn (e1, e2) => ELet (s, t, e1, e2)) - ((env, e1), (MonoEnv.pushERel env s t (SOME e1), e2)) + (((env, e1), Unknowns 0), + ((MonoEnv.pushERel env s t (SOME e1), e2), Known e1)) (* ASK: | EClosure (n, es) => ? *) | EUnurlify (e, t, b) => wrap1 (fn e => EUnurlify (e, t, b)) e - | EQuery q => - let - val (exp', state) = cacheQuery (effs, env, state, q) - in - (Impure (exp', loc), state) - end + | EQuery q => (cacheQuery (effs, env, q), state) | _ => (if effectful effs env exp then Impure exp - else Cachable (fn state => - case cachePure (env, exp', state) of + else Cachable (InvalInfo.empty, + fn state => + case cacheExp (env, exp', InvalInfo.empty, state) of NONE => ((exp', loc), state) | SOME (exp', state) => ((exp', loc), state)), state) @@ -911,9 +1075,15 @@ fun cache (effs : IS.set) ((env, exp as (exp', loc)), state) = fun addCaching file = let val effs = effectfulDecls file - fun doTopLevelExp env exp state = runSubexp (cache effs ((env, exp), state)) + fun doTopLevelExp env exp state = runSubexp (cacheTree effs ((env, exp), state)) in - ((fileTopLevelMapfoldB doTopLevelExp file (SIMM.empty, IM.empty, [], 0)), effs) + (fileTopLevelMapfoldB doTopLevelExp + file + {tableToIndices = SIMM.empty, + indexToInvalInfo = IM.empty, + ffiInfo = [], + index = 0}, + effs) end @@ -951,12 +1121,16 @@ structure Invalidations = struct fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) - fun invalidations ((query, numArgs), dml) = - (map (map optionAtomExpToExp) - o removeRedundant madeRedundantBy - o map (eqsToInvalidation numArgs) - o eqss) - (query, dml) + fun invalidations ((invalInfo, numArgs), dml) = + let + val query = InvalInfo.query invalInfo + in + (map (map optionAtomExpToExp) + o removeRedundant madeRedundantBy + o map (eqsToInvalidation numArgs) + o eqss) + (query, dml) + end end @@ -966,7 +1140,7 @@ val invalidations = Invalidations.invalidations (* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) (* val gunk' : exp list ref = ref [] *) -fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, ffiInfo, index)), effs) = +fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, index}), effs) = let val flushes = List.concat o map (fn (i, argss) => map (fn args => flush (i, args)) argss) @@ -979,14 +1153,13 @@ fun addFlushing ((file, (tableToIndices, indexToQueryNumArgs, ffiInfo, index)), val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => - SOME (map (fn i => (case IM.find (indexToQueryNumArgs, i) of - SOME queryNumArgs => - (* DEBUG *) - ((* gunk := (queryNumArgs, dmlParsed) :: !gunk; *) - (i, invalidations (queryNumArgs, dmlParsed))) + SOME (map (fn i => (case IM.find (indexToInvalInfo, i) of + SOME invalInfo => + (i, invalidations (invalInfo, dmlParsed)) (* TODO: fail more gracefully. *) + (* This probably means invalidating everything.... *) | NONE => raise Match)) - (SIMM.findList (tableToIndices, tableDml dmlParsed))) + (SIMM.findList (tableToIndices, tableOfDml dmlParsed))) | NONE => NONE in case inval of -- cgit v1.2.3 From 2e9eb1c2b1b1279e627034b6bfbfb86e4f2bfba7 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 4 Nov 2015 20:12:07 -0500 Subject: Consildation of caches understands sqlification. --- caching-tests/test.ur | 30 ++-- src/sqlcache.sml | 389 +++++++++++++++++++++++++++++++------------------- 2 files changed, 267 insertions(+), 152 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 00f05768..338f9236 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -11,15 +11,29 @@ fun cache id = | Some row => {[row.Tab.Val]}} -fun cache2 id v = - res <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[id]} AND tab.Val = {[v]}); +(* fun cache2 id v = *) +(* res <- oneOrNoRows (SELECT tab.Val *) +(* FROM tab *) +(* WHERE tab.Id = {[id]} AND tab.Val = {[v]}); *) +(* return *) +(* Reading {[id]}. *) +(* {case res of *) +(* None => Nope, that's not it. *) +(* | Some _ => Hooray! You guessed it!} *) +(* *) + +fun cache2 id1 id2 = + res1 <- oneOrNoRows (SELECT tab.Val + FROM tab + WHERE tab.Id = {[id1]}); + res2 <- oneOrNoRows (SELECT tab.Val + FROM tab + WHERE tab.Id = {[id2]}); return - Reading {[id]}. - {case res of - None => Nope, that's not it. - | Some _ => Hooray! You guessed it!} + Reading {[id1]} and {[id2]}. + {case (res1, res2) of + (Some _, Some _) => Both are there. + | _ => One of them is missing.} fun flush id = diff --git a/src/sqlcache.sml b/src/sqlcache.sml index aec97bce..eccf90d1 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -64,8 +64,8 @@ val dummyLoc = ErrorMsg.dummySpan (*********************) (* From the MLton wiki. *) -infixr 3 /> fun f /> y = fn x => f (x, y) (* Right section *) -infixr 3 f (x, y) (* Left section *) +infix 3 \> fun f \> y = f y (* Left application *) fun mapFst f (x, y) = (f x, y) @@ -319,12 +319,15 @@ val freeVars = then vars else IS.add (vars, n - bound) | (_, _, vars) => vars, - bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 | (bound, _) => bound} + bind = fn (bound, MonoUtil.Exp.RelE _) => bound + 1 + | (bound, _) => bound} 0 IS.empty datatype unbind = Known of exp | Unknowns of int +datatype cacheArg = AsIs of exp | Urlify of exp + structure InvalInfo :> sig type t type state = {tableToIndices : SIMM.multimap, @@ -334,27 +337,48 @@ structure InvalInfo :> sig val empty : t val singleton : Sql.query -> t val query : t -> Sql.query - val orderArgs : t * IS.set -> int list + val orderArgs : t * IS.set -> cacheArg list val unbind : t * unbind -> t option val union : t * t -> t val updateState : t * int * state -> state end = struct - type t = Sql.query list + datatype sqlArg = FreeVar of int | Sqlify of string * string * sqlArg * typ + + type subst = sqlArg IM.map + + (* TODO: store free variables as well? *) + type t = (Sql.query * subst) list type state = {tableToIndices : SIMM.multimap, indexToInvalInfo : (t * int) IntBinaryMap.map, ffiInfo : {index : int, params : int} list, index : int} - val empty = [] - - fun singleton q = [q] - - val union = op@ + structure AM = BinaryMapFn(struct + type ord_key = sqlArg + (* Saw this on MLton wiki. *) + fun ifNotEq (cmp, thunk) = case cmp of + EQUAL => thunk () + | _ => cmp + fun try f x () = f x + val rec compare = + fn (FreeVar n1, FreeVar n2) => + Int.compare (n1, n2) + | (FreeVar _, _) => LESS + | (_, FreeVar _) => GREATER + | (Sqlify (m1, x1, arg1, t1), Sqlify (m2, x2, arg2, t2)) => + String.compare (m1, m2) + <\ifNotEq\> try String.compare (x1, x2) + <\ifNotEq\> try MonoUtil.Typ.compare (t1, t2) + <\ifNotEq\> try compare (arg1, arg2) + end) + + (* Traversal Utilities *) + (* TODO: get rid of unused ones. *) (* Need lift', etc. because we don't have rank-2 polymorphism. This should - probably use a functor, but this works for now. *) + probably use a functor (an ML one, not Haskell) but works for now. *) fun traverseSqexp (pure, _, lift, _, lift'', lift2, _) f = let val rec tr = @@ -385,76 +409,146 @@ end = struct mp end - fun foldMapQuery plus zero = traverseQuery (fn _ => zero, - fn _ => zero, - fn _ => fn x => x, - fn _ => fn x => x, - fn _ => fn x => x, - fn _ => plus, - fn _ => plus) + (* Include unused tuple elements in argument for convenience of using same + argument as [traverseQuery]. *) + fun traverseIM (pure, _, _, _, _, lift2, _) f = + IM.foldli (fn (k, v, acc) => lift2 (fn (acc, w) => IM.insert (acc, k, w)) (acc, f (k,v))) + (pure IM.empty) + + fun traverseSubst (ops as (_, pure', lift, _, _, _, lift2')) f = + let + val rec mp = + fn FreeVar n => f n + | Sqlify (m, x, arg, t) => lift (fn mparg => Sqlify (m, x, mparg, t)) (mp arg) + in + traverseIM ops (fn (_, v) => mp v) + end + + fun monoidOps plus zero = (fn _ => zero, fn _ => zero, + fn _ => fn x => x, fn _ => fn x => x, fn _ => fn x => x, + fn _ => plus, fn _ => plus) + + val optionOps = (SOME, SOME, omap, omap, omap, omap2, omap2) - val omapQuery = traverseQuery (SOME, SOME, omap, omap, omap, omap2, omap2) + fun foldMapQuery plus zero = traverseQuery (monoidOps plus zero) + val omapQuery = traverseQuery optionOps + fun foldMapIM plus zero = traverseIM (monoidOps plus zero) + fun omapIM f = traverseIM optionOps f + fun foldMapSubst plus zero = traverseSubst (monoidOps plus zero) + fun omapSubst f = traverseSubst optionOps f val varsOfQuery = foldMapQuery IS.union IS.empty (fn e' => freeVars (e', dummyLoc)) + val varsOfSubst = foldMapSubst IS.union IS.empty IS.singleton + val varsOfList = fn [] => IS.empty | (q::qs) => varsOfQuery (List.foldl Sql.Union q qs) - fun orderArgs (qs, vars) = + (* Signature Implementation *) + + val empty = [] + + fun singleton q = [(q, IS.foldl (fn (n, acc) => IM.insert (acc, n, FreeVar n)) + IM.empty + (varsOfQuery q))] + + val union = op@ + + fun sqlArgsMap (qs : t) = + let + val args = + List.foldl (fn ((q, subst), acc) => + IM.foldl (fn (arg, acc) => AM.insert (acc, arg, ())) acc subst) + AM.empty + qs + val countRef = ref (~1) + fun count () = (countRef := !countRef + 1; !countRef) + in + (* Maps each arg to a different consecutive integer, starting from 0. *) + AM.map count args + end + + val rec expOfArg = + fn FreeVar n => (ERel n, dummyLoc) + | Sqlify (m, x, arg, t) => (EFfiApp (m, x, [(expOfArg arg, t)]), dummyLoc) + + fun orderArgs (qs : t, vars) = let - val invalVars = varsOfList qs + fun erel n = (ERel n, dummyLoc) + val argsMap = sqlArgsMap qs + val args = map (expOfArg o #1) (AM.listItemsi argsMap) + val invalVars = List.foldl IS.union IS.empty (map freeVars args) in (* Put arguments we might invalidate by first. *) - IS.listItems invalVars @ IS.listItems (IS.difference (vars, invalVars)) + map AsIs args + (* TODO: make sure these variables are okay to remove from the argument list. *) + @ map (Urlify o erel) (IS.listItems (IS.difference (vars, invalVars))) end (* As a kludge, we rename the variables in the query to correspond to the argument of the cache they're part of. *) - val query = - fn (q::qs) => + fun query (qs : t) = let - val q = List.foldl Sql.Union q qs - val ns = IS.listItems (varsOfQuery q) - val rename = - fn ERel n => omap ERel (indexOf (fn n' => n' = n) ns) + val argsMap = sqlArgsMap qs + fun substitute subst = + fn ERel n => IM.find (subst, n) + <\obind\> + (fn arg => + AM.find (argsMap, arg) + <\obind\> + (fn n' => SOME (ERel n'))) | _ => raise Match in - case omapQuery rename q of - SOME q => q - (* We should never get NONE because indexOf should never fail. *) - | NONE => raise Match + case (map #1 qs) of + (q :: qs) => + let + val q = List.foldl Sql.Union q qs + val ns = IS.listItems (varsOfQuery q) + val rename = + fn ERel n => omap ERel (indexOf (fn n' => n' = n) ns) + | _ => raise Match + in + case omapQuery rename q of + SOME q => q + (* We should never get NONE because indexOf should never fail. *) + | NONE => raise Match + end + (* We should never reach this case because [updateState] won't + put anything in the state if there are no queries. *) + | [] => raise Match end - (* We should never reach this case because [updateState] won't put - anything in the state if there are no queries. *) - | [] => raise Match - fun unbind1 ub = - case ub of - Known (e', loc) => - let - val replaceRel0 = case e' of - ERel m => SOME (ERel m) - | _ => NONE - in - omapQuery (fn ERel 0 => replaceRel0 - | ERel n => SOME (ERel (n-1)) - | _ => raise Match) - end - | Unknowns k => - omapQuery (fn ERel n => if n >= k then NONE else SOME (ERel (n-k)) - | _ => raise Match) + val rec argOfExp = + fn (ERel n, _) => SOME (FreeVar n) + | (EFfiApp ("Basis", x, [(exp, t)]), _) => + if String.isPrefix "sqlify" x + then omap (fn arg => Sqlify ("Basis", x, arg, t)) (argOfExp exp) + else NONE + | _ => NONE + + val unbind1 = + fn Known e => + let + val replacement = argOfExp e + in + omapSubst (fn 0 => replacement + | n => SOME (FreeVar (n-1))) + end + | Unknowns k => omapSubst (fn n => if n >= k then NONE else SOME (FreeVar (n-k))) fun unbind (qs, ub) = case ub of (* Shortcut if nothing's changing. *) Unknowns 0 => SOME qs - | _ => osequence (map (unbind1 ub) qs) + | _ => osequence (map (fn (q, subst) => unbind1 ub subst + <\obind\> + (fn subst' => SOME (q, subst'))) qs) - fun updateState ((qs, numArgs, state as {index, ...}) : t * int * state) = - {tableToIndices = List.foldr (fn (q, acc) => + fun updateState (qs, numArgs, state as {index, ...} : state) = + {tableToIndices = List.foldr (fn ((q, _), acc) => SS.foldl (fn (tab, acc) => SIMM.insert (acc, tab, index)) acc @@ -469,6 +563,70 @@ end structure UF = UnionFindFn(AtomExpKey) +val rec sqexpToFormula = + fn Sql.SqTrue => Combo (Conj, []) + | Sql.SqFalse => Combo (Disj, []) + | Sql.SqNot e => Negate (sqexpToFormula e) + | Sql.Binop (Sql.RCmp c, e1, e2) => Atom (c, e1, e2) + | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Conj | Sql.Or => Disj, + [sqexpToFormula p1, sqexpToFormula p2]) + (* ASK: any other sqexps that can be props? *) + | _ => raise Match + +fun renameTables tablePairs = + let + fun renameString table = + case List.find (fn (_, t) => table = t) tablePairs of + NONE => table + | SOME (realTable, _) => realTable + val renameSqexp = + fn Sql.Field (table, field) => Sql.Field (renameString table, field) + | e => e + fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) + in + mapFormula renameAtom + end + +val rec queryToFormula = + fn Sql.Query1 {Where = NONE, ...} => Combo (Conj, []) + | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => + renameTables tablePairs (sqexpToFormula e) + | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula q1, queryToFormula q2]) + +fun valsToFormula (table, vals) = + Combo (Conj, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) + +val rec dmlToFormula = + fn Sql.Insert (table, vals) => valsToFormula (table, vals) + | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) + | Sql.Update (table, vals, wher) => + let + val fWhere = sqexpToFormula wher + val fVals = valsToFormula (table, vals) + val modifiedFields = SS.addList (SS.empty, map #1 vals) + (* TODO: don't use field name hack. *) + val markField = + fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) + then Sql.Field (t, v ^ "'") + else e + | e => e + val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) + in + renameTables [(table, "T")] + (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), + Combo (Conj, [mark fVals, fWhere])])) + end + +(* val rec toFormula = *) +(* fn (Sql.Union (q1, q2), d) => Combo (Disj, [toFormula (q1, d), toFormula (q2, d)]) *) +(* | (q as Sql.Query1 {Select = items, ...}, d) => *) +(* let *) +(* val selected = osequence (map (fn )) *) +(* in *) +(* case selected of *) +(* NONE => (Combo (Conj, [markQuery (), markDml fDml])) *) +(* end *) + structure ConflictMaps = struct structure TK = TripleKeyFn(structure I = CmpKey @@ -582,72 +740,11 @@ end val conflictMaps = ConflictMaps.conflictMaps -val rec sqexpToFormula = - fn Sql.SqTrue => Combo (Conj, []) - | Sql.SqFalse => Combo (Disj, []) - | Sql.SqNot e => Negate (sqexpToFormula e) - | Sql.Binop (Sql.RCmp c, e1, e2) => Atom (c, e1, e2) - | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Conj | Sql.Or => Disj, - [sqexpToFormula p1, sqexpToFormula p2]) - (* ASK: any other sqexps that can be props? *) - | _ => raise Match - -fun renameTables tablePairs = - let - fun renameString table = - case List.find (fn (_, t) => table = t) tablePairs of - NONE => table - | SOME (realTable, _) => realTable - val renameSqexp = - fn Sql.Field (table, field) => Sql.Field (renameString table, field) - | e => e - fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) - in - mapFormula renameAtom - end - -val rec queryToFormula = - fn Sql.Query1 {Where = NONE, ...} => Combo (Conj, []) - | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => - renameTables tablePairs (sqexpToFormula e) - | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula q1, queryToFormula q2]) - -fun valsToFormula (table, vals) = - Combo (Conj, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) - -val rec dmlToFormula = - fn Sql.Insert (table, vals) => valsToFormula (table, vals) - | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) - | Sql.Update (table, vals, wher) => - let - val fWhere = sqexpToFormula wher - val fVals = valsToFormula (table, vals) - val modifiedFields = SS.addList (SS.empty, map #1 vals) - (* TODO: don't use field name hack. *) - val markField = - fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) - then Sql.Field (t, v ^ "'") - else e - | e => e - val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) - in - renameTables [(table, "T")] - (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), - Combo (Conj, [mark fVals, fWhere])])) - end - (*************************************) (* Program Instrumentation Utilities *) (*************************************) -val varName = - let - val varNumber = ref 0 - in - fn s => (varNumber := !varNumber + 1; s ^ Int.toString (!varNumber)) - end - val {check, store, flush, ...} = getCache () val dummyTyp = (TRecord [], dummyLoc) @@ -752,7 +849,7 @@ val simplifySql = chunks fun wrapLets e' = (* Important that this is foldl (to oppose foldr above). *) - List.foldl (fn (v, e') => ELet (varName "sqlArg", stringTyp, v, (e', loc))) + List.foldl (fn (v, e') => ELet ("sqlArg", stringTyp, v, (e', loc))) e' newVariables val numArgs = length newVariables @@ -900,8 +997,8 @@ fun cacheWrap (env, exp, typ, args, index) = in SOME (ECase (check, [((PNone stringTyp, loc), - (ELet (varName "q", typ, exp, (ESeq (store, rel0), loc)), loc)), - ((PSome (stringTyp, (PVar (varName "hit", stringTyp), loc)), loc), + (ELet ("q", typ, exp, (ESeq (store, rel0), loc)), loc)), + ((PSome (stringTyp, (PVar ("hit", stringTyp), loc)), loc), (* Boolean is false because we're not unurlifying from a cookie. *) (EUnurlify (rel0, typ, false), loc))], {disc = (TOption stringTyp, loc), result = typ})) @@ -917,29 +1014,35 @@ val worthCaching = fn EQuery _ => true | exp' => expSize (exp', dummyLoc) > sizeWorthCaching -fun cacheExp ((env, exp', invalInfo, state) : MonoEnv.env * exp' * InvalInfo.t * state) = - case (worthCaching exp') - - typOfExp' env exp' of +fun cacheExp (env, exp', invalInfo, state : state) = + case worthCaching exp' <\oguard\> typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => let - val ns = InvalInfo.orderArgs (invalInfo, freeVars (exp', dummyLoc)) - val numArgs = length ns - in (List.foldr (fn (_, NONE) => NONE - | ((n, typ), SOME args) => - (MonoFooify.urlify env ((ERel n, dummyLoc), typ)) - - (fn arg => SOME (arg :: args))) - (SOME []) - (map (fn n => (n, #2 (MonoEnv.lookupERel env n))) ns)) - - (fn args => - (cacheWrap (env, (exp', dummyLoc), typ, args, #index state)) - - (fn cachedExp => - SOME (cachedExp, InvalInfo.updateState (invalInfo, numArgs, state)))) + val args = InvalInfo.orderArgs (invalInfo, freeVars (exp', dummyLoc)) + val numArgs = length args + in (List.foldr (fn (arg, acc) => + acc + <\obind\> + (fn args' => + (case arg of + AsIs exp => SOME exp + | Urlify exp => + typOfExp env exp + <\obind\> + (fn typ => + (MonoFooify.urlify env (exp, typ)))) + <\obind\> + (fn arg' => SOME (arg' :: args')))) + (SOME []) + args) + <\obind\> + (fn args' => + cacheWrap (env, (exp', dummyLoc), typ, args', #index state) + <\obind\> + (fn cachedExp => + SOME (cachedExp, InvalInfo.updateState (invalInfo, numArgs, state)))) end fun cacheQuery (effs, env, q) : subexp = @@ -959,9 +1062,9 @@ fun cacheQuery (effs, env, q) : subexp = val attempt = (* Ziv misses Haskell's do notation.... *) (safe 0 queryText andalso safe 0 initial andalso safe 2 body) - + <\oguard\> Sql.parse Sql.query queryText - + <\obind\> (fn queryParsed => let val invalInfo = InvalInfo.singleton queryParsed @@ -998,7 +1101,7 @@ fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = (fn (subexp, (_, unbinds)) => InvalInfo.unbind (invalInfoOfSubexp subexp, unbinds)) (subexps, args))) - + <\obind\> (fn invalInfo => SOME (Cachable (invalInfo, fn state => @@ -1119,8 +1222,6 @@ structure Invalidations = struct | _ => false) | _ => false - fun eqss (query, dml) = conflictMaps (queryToFormula query, dmlToFormula dml) - fun invalidations ((invalInfo, numArgs), dml) = let val query = InvalInfo.query invalInfo @@ -1128,8 +1229,8 @@ structure Invalidations = struct (map (map optionAtomExpToExp) o removeRedundant madeRedundantBy o map (eqsToInvalidation numArgs) - o eqss) - (query, dml) + o conflictMaps) + (queryToFormula query, dmlToFormula dml) end end @@ -1140,7 +1241,7 @@ val invalidations = Invalidations.invalidations (* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) (* val gunk' : exp list ref = ref [] *) -fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, index}), effs) = +fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state), effs) = let val flushes = List.concat o map (fn (i, argss) => map (fn args => flush (i, args)) argss) -- cgit v1.2.3 From b2c1c524f9074637cfbedc07a065f2c75d635e73 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 5 Nov 2015 01:48:42 -0500 Subject: First draft of more specific formulas for queries. --- caching-tests/test.urp | 1 + src/sqlcache.sml | 152 +++++++++++++++++++++++++++++++++++++------------ src/union_find_fn.sml | 5 ++ 3 files changed, 123 insertions(+), 35 deletions(-) (limited to 'src') diff --git a/caching-tests/test.urp b/caching-tests/test.urp index 2e07dad3..55b0bed7 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -2,5 +2,6 @@ database test.db sql test.sql safeGet Test/flush safeGet Test/flush17 +minHeap 4096 test diff --git a/src/sqlcache.sml b/src/sqlcache.sml index eccf90d1..7a7358f0 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -9,6 +9,10 @@ structure SS = BinarySetFn(SK) structure SM = BinaryMapFn(SK) structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) +(* ASK: how do we deal with heap reallocation? *) + +fun id x = x + fun iterate f n x = if n < 0 then raise Fail "Can't iterate function negative number of times." else if n = 0 @@ -227,6 +231,8 @@ fun mapFormula mf = | Negate f => Negate (mapFormula mf f) | Combo (j, fs) => Combo (j, map (mapFormula mf) fs) +fun mapFormulaExps mf = mapFormula (fn (cmp, e1, e2) => (cmp, mf e1, mf e2)) + (****************) (* SQL Analysis *) @@ -582,56 +588,117 @@ fun renameTables tablePairs = val renameSqexp = fn Sql.Field (table, field) => Sql.Field (renameString table, field) | e => e - fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) + (* fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) *) in - mapFormula renameAtom + mapFormulaExps renameSqexp end -val rec queryToFormula = - fn Sql.Query1 {Where = NONE, ...} => Combo (Conj, []) - | Sql.Query1 {From = tablePairs, Where = SOME e, ...} => - renameTables tablePairs (sqexpToFormula e) - | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula q1, queryToFormula q2]) +fun queryToFormula marker = + fn Sql.Query1 {Select = sitems, From = tablePairs, Where = wher} => + let + val fWhere = case wher of + NONE => Combo (Conj, []) + | SOME e => sqexpToFormula e + in + renameTables tablePairs + (case marker of + NONE => fWhere + | SOME markFields => + let + val fWhereMarked = mapFormulaExps markFields fWhere + val toSqexp = + fn Sql.SqField tf => Sql.Field tf + | Sql.SqExp (se, _) => se + fun ineq se = Atom (Sql.Ne, se, markFields se) + val fIneqs = Combo (Disj, map (ineq o toSqexp) sitems) + in + (Combo (Conj, + [fWhere, + Combo (Disj, + [Negate fWhereMarked, + Combo (Conj, [fWhereMarked, fIneqs])])])) + end) + end + | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula marker q1, queryToFormula marker q2]) -fun valsToFormula (table, vals) = - Combo (Conj, map (fn (field, v) => Atom (Sql.Eq, Sql.Field (table, field), v)) vals) +fun valsToFormula (markLeft, markRight) (table, vals) = + Combo (Conj, + map (fn (field, v) => Atom (Sql.Eq, markLeft (Sql.Field (table, field)), markRight v)) + vals) -val rec dmlToFormula = - fn Sql.Insert (table, vals) => valsToFormula (table, vals) - | Sql.Delete (table, wher) => renameTables [(table, "T")] (sqexpToFormula wher) +(* TODO: verify logic for insertion and deletion. *) +val rec dmlToFormulaMarker = + fn Sql.Insert (table, vals) => (valsToFormula (id, id) (table, vals), NONE) + | Sql.Delete (table, wher) => (renameTables [(table, "T")] (sqexpToFormula wher), NONE) | Sql.Update (table, vals, wher) => let val fWhere = sqexpToFormula wher - val fVals = valsToFormula (table, vals) + fun fVals marks = valsToFormula marks (table, vals) val modifiedFields = SS.addList (SS.empty, map #1 vals) (* TODO: don't use field name hack. *) - val markField = - fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) + fun markFields table = + fn e as Sql.Field (t, v) => if t = table andalso SS.member (modifiedFields, v) then Sql.Field (t, v ^ "'") else e + | Sql.SqNot e => Sql.SqNot (markFields table e) + | Sql.Binop (r, e1, e2) => Sql.Binop (r, markFields table e1, markFields table e2) + | Sql.SqKnown e => Sql.SqKnown (markFields table e) + | Sql.SqFunc (s, e) => Sql.SqFunc (s, markFields table e) | e => e - val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) + val mark = mapFormulaExps (markFields "T") in - renameTables [(table, "T")] - (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), - Combo (Conj, [mark fVals, fWhere])])) + (* Inside renameTables, we mark with table "T". Outside, we use the real table name. *) + (renameTables [(table, "T")] + (Combo (Disj, [Combo (Conj, [fVals (id, markFields "T"), mark fWhere]), + Combo (Conj, [fVals (markFields "T", id), fWhere])])), + SOME (markFields table)) end -(* val rec toFormula = *) -(* fn (Sql.Union (q1, q2), d) => Combo (Disj, [toFormula (q1, d), toFormula (q2, d)]) *) -(* | (q as Sql.Query1 {Select = items, ...}, d) => *) -(* let *) -(* val selected = osequence (map (fn )) *) -(* in *) -(* case selected of *) -(* NONE => (Combo (Conj, [markQuery (), markDml fDml])) *) -(* end *) +fun pairToFormulas (query, dml) = + let + val (fDml, marker) = dmlToFormulaMarker dml + in + (queryToFormula marker query, fDml) + end + +(* structure ToFormula = struct *) + +(* val testOfQuery : Sql.query1 -> (Sql.cmp * Sql.sqexp * Sql.sqexp) formula = *) +(* fn {From = tablePairs, Where = SOME e, ...} => renameTables tablePairs (sqexpToFormula e) *) +(* | {Where = NONE, ...} => Combo (Conj, []) *) + +(* (* If selecting some parsable subset of fields, says which ones. [NONE] *) +(* means anything could be selected. *) *) +(* fun fieldsOfQuery (q : Sql.query1) = *) +(* osequence (map (fn Sql.SqField tf => SOME tf *) +(* | Sql.SqExp (Sql.Field tf) => SOME tf *) +(* | _ => NONE) (#Select q)) *) + +(* fun fieldsOfVals (table, vals, wher) = *) +(* let *) +(* val fWhere = renameTables [(table, "T")] (sqexpToFormula wher) *) +(* val fVals = valsToFormula (table, vals) *) +(* val modifiedFields = SS.addList (SS.empty, map #1 vals) *) +(* (* TODO: don't use field name hack. *) *) +(* val markField = *) +(* fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) *) +(* then Sql.Field (t, v ^ "'") *) +(* else e *) +(* | e => e *) +(* val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) *) +(* in *) +(* renameTables [(table, "T")] *) +(* (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), *) +(* Combo (Conj, [mark fVals, fWhere])])) *) +(* end *) +(* end *) structure ConflictMaps = struct structure TK = TripleKeyFn(structure I = CmpKey structure J = AtomOptionKey structure K = AtomOptionKey) + structure TS : ORD_SET = BinarySetFn(TK) val toKnownEquality = @@ -641,10 +708,20 @@ structure ConflictMaps = struct fn (Sql.Eq, SOME e1, SOME e2) => SOME (e1, e2) | _ => NONE - val equivClasses : (Sql.cmp * atomExp option * atomExp option) list -> atomExp list list = - UF.classes - o List.foldl UF.union' UF.empty - o List.mapPartial toKnownEquality + fun equivClasses atoms : atomExp list list option = + let + val uf = List.foldl UF.union' UF.empty (List.mapPartial toKnownEquality atoms) + val ineqs = List.filter (fn (cmp, _, _) => + cmp = Sql.Ne orelse cmp = Sql.Lt orelse cmp = Sql.Gt) + atoms + val contradiction = + fn (cmp, SOME ae1, SOME ae2) => (cmp = Sql.Ne orelse cmp = Sql.Lt orelse cmp = Sql.Gt) + andalso not (UF.together (uf, ae1, ae2)) + (* If we don't know one side of the comparision, not a contradiction. *) + | _ => false + in + not (List.exists contradiction atoms) <\oguard\> SOME (UF.classes uf) + end fun addToEqs (eqs, n, e) = case IM.find (eqs, n) of @@ -734,7 +811,10 @@ structure ConflictMaps = struct fun dnf (fQuery, fDml) = normalize simplify normalizeAtom Disj (Combo (Conj, [markQuery fQuery, markDml fDml])) - val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass o equivClasses) o dnf + val conflictMaps = + List.mapPartial (mergeEqs o map eqsOfClass) + o List.mapPartial equivClasses + o dnf end @@ -1230,7 +1310,7 @@ structure Invalidations = struct o removeRedundant madeRedundantBy o map (eqsToInvalidation numArgs) o conflictMaps) - (queryToFormula query, dmlToFormula dml) + (pairToFormulas (query, dml)) end end @@ -1269,11 +1349,13 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state | SOME invs => sequence (flushes invs @ [dmlExp]) end | e' => e' + val file = fileMap doExp file + in (* DEBUG *) (* gunk := []; *) ffiInfoRef := ffiInfo; - fileMap doExp file + file end diff --git a/src/union_find_fn.sml b/src/union_find_fn.sml index e6f8d9bf..7880591f 100644 --- a/src/union_find_fn.sml +++ b/src/union_find_fn.sml @@ -3,6 +3,7 @@ functor UnionFindFn(K : ORD_KEY) :> sig val empty : unionFind val union : unionFind * K.ord_key * K.ord_key -> unionFind val union' : (K.ord_key * K.ord_key) * unionFind -> unionFind + val together : unionFind * K.ord_key * K.ord_key -> bool val classes : unionFind -> K.ord_key list list end = struct @@ -34,6 +35,10 @@ fun find ((uf, _), x) = (S.listItems o #1 o findPair) (uf, x) fun classes (_, cs) = (map S.listItems o M.listItems) cs +fun together ((uf, _), x, y) = case K.compare (#2 (findPair (uf, x)), #2 (findPair (uf, y))) of + EQUAL => true + | _ => false + fun union ((uf, cs), x, y) = let val (xSet, xRep) = findPair (uf, x) -- cgit v1.2.3 From 1c2069212a7dec30db45e02391d7ca0154cd5709 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sat, 7 Nov 2015 15:16:44 -0500 Subject: Fix some table renaming issues. --- caching-tests/test.ur | 52 ++++++++++++------- caching-tests/test.urp | 5 +- caching-tests/test.urs | 6 ++- src/sqlcache.sml | 136 +++++++++++++++++++------------------------------ 4 files changed, 93 insertions(+), 106 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 338f9236..e08c6e47 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -1,4 +1,4 @@ -table tab : {Id : int, Val : int} PRIMARY KEY Id +table tab : {Id : int, Val : int, Foo : int} PRIMARY KEY Id fun cache id = res <- oneOrNoRows (SELECT tab.Val @@ -22,19 +22,19 @@ fun cache id = (* | Some _ => Hooray! You guessed it!} *) (* *) -fun cache2 id1 id2 = - res1 <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[id1]}); - res2 <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[id2]}); - return - Reading {[id1]} and {[id2]}. - {case (res1, res2) of - (Some _, Some _) => Both are there. - | _ => One of them is missing.} - +(* fun cache2 id1 id2 = *) +(* res1 <- oneOrNoRows (SELECT tab.Val *) +(* FROM tab *) +(* WHERE tab.Id = {[id1]}); *) +(* res2 <- oneOrNoRows (SELECT tab.Val *) +(* FROM tab *) +(* WHERE tab.Id = {[id2]}); *) +(* return *) +(* Reading {[id1]} and {[id2]}. *) +(* {case (res1, res2) of *) +(* (Some _, Some _) => Both are there. *) +(* | _ => One of them is missing.} *) +(* *) fun flush id = dml (UPDATE tab @@ -44,14 +44,30 @@ fun flush id = Changed {[id]}! -val flush17 = +fun flash id = dml (UPDATE tab - SET Val = Val * (Id + 2) / Val - 3 - WHERE Id = 17); + SET Foo = Val + WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); return - Changed specifically 17! + Maybe changed {[id]}? +fun floosh id = + dml (UPDATE tab + SET Id = {[id + 1]} + WHERE Id = {[id]}); + return + Shifted {[id]}! + + +(* val flush17 = *) +(* dml (UPDATE tab *) +(* SET Val = Val * (Id + 2) / Val - 3 *) +(* WHERE Id = 17); *) +(* return *) +(* Changed specifically 17! *) +(* *) + (* fun flush id = *) (* res <- oneOrNoRows (SELECT tab.Val *) (* FROM tab *) diff --git a/caching-tests/test.urp b/caching-tests/test.urp index 55b0bed7..62041bdd 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -1,7 +1,8 @@ database test.db sql test.sql safeGet Test/flush -safeGet Test/flush17 -minHeap 4096 +safeGet Test/flash +safeGet Test/floosh +# safeGet Test/flush17 test diff --git a/caching-tests/test.urs b/caching-tests/test.urs index fc23c47d..ebe6bf56 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -1,4 +1,6 @@ val cache : int -> transaction page -val cache2 : int -> int -> transaction page +(* val cache2 : int -> int -> transaction page *) val flush : int -> transaction page -val flush17 : transaction page +val flash : int -> transaction page +val floosh : int -> transaction page +(* val flush17 : transaction page *) diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 7a7358f0..7b3a5225 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache :> SQLCACHE = struct +structure Sqlcache (* DEBUG :> SQLCACHE *) = struct open Mono @@ -567,6 +567,12 @@ end = struct end +(* DEBUG *) +val gunk0 : ((Sql.cmp * Sql.sqexp * Sql.sqexp) formula + * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula) list ref = ref [] +val gunk1 : (Sql.cmp * atomExp option * atomExp option) list list list ref = ref [] +val gunk2 : exp list ref = ref [] + structure UF = UnionFindFn(AtomExpKey) val rec sqexpToFormula = @@ -579,18 +585,22 @@ val rec sqexpToFormula = (* ASK: any other sqexps that can be props? *) | _ => raise Match +fun mapSqexpFields f = + fn Sql.Field (t, v) => f (t, v) + | Sql.SqNot e => Sql.SqNot (mapSqexpFields f e) + | Sql.Binop (r, e1, e2) => Sql.Binop (r, mapSqexpFields f e1, mapSqexpFields f e2) + | Sql.SqKnown e => Sql.SqKnown (mapSqexpFields f e) + | Sql.SqFunc (s, e) => Sql.SqFunc (s, mapSqexpFields f e) + | e => e + fun renameTables tablePairs = let - fun renameString table = + fun rename table = case List.find (fn (_, t) => table = t) tablePairs of NONE => table | SOME (realTable, _) => realTable - val renameSqexp = - fn Sql.Field (table, field) => Sql.Field (renameString table, field) - | e => e - (* fun renameAtom (cmp, e1, e2) = (cmp, renameSqexp e1, renameSqexp e2) *) in - mapFormulaExps renameSqexp + mapSqexpFields (fn (t, f) => Sql.Field (rename t, f)) end fun queryToFormula marker = @@ -598,26 +608,25 @@ fun queryToFormula marker = let val fWhere = case wher of NONE => Combo (Conj, []) - | SOME e => sqexpToFormula e + | SOME e => sqexpToFormula (renameTables tablePairs e) in - renameTables tablePairs - (case marker of - NONE => fWhere - | SOME markFields => - let - val fWhereMarked = mapFormulaExps markFields fWhere - val toSqexp = - fn Sql.SqField tf => Sql.Field tf - | Sql.SqExp (se, _) => se - fun ineq se = Atom (Sql.Ne, se, markFields se) - val fIneqs = Combo (Disj, map (ineq o toSqexp) sitems) - in - (Combo (Conj, - [fWhere, - Combo (Disj, - [Negate fWhereMarked, - Combo (Conj, [fWhereMarked, fIneqs])])])) - end) + case marker of + NONE => fWhere + | SOME markFields => + let + val fWhereMarked = mapFormulaExps markFields fWhere + val toSqexp = + fn Sql.SqField tf => Sql.Field tf + | Sql.SqExp (se, _) => se + fun ineq se = Atom (Sql.Ne, se, markFields se) + val fIneqs = Combo (Disj, map (ineq o renameTables tablePairs o toSqexp) sitems) + in + (Combo (Conj, + [fWhere, + Combo (Disj, + [Negate fWhereMarked, + Combo (Conj, [fWhereMarked, fIneqs])])])) + end end | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula marker q1, queryToFormula marker q2]) @@ -629,70 +638,33 @@ fun valsToFormula (markLeft, markRight) (table, vals) = (* TODO: verify logic for insertion and deletion. *) val rec dmlToFormulaMarker = fn Sql.Insert (table, vals) => (valsToFormula (id, id) (table, vals), NONE) - | Sql.Delete (table, wher) => (renameTables [(table, "T")] (sqexpToFormula wher), NONE) + | Sql.Delete (table, wher) => (sqexpToFormula (renameTables [(table, "T")] wher), NONE) | Sql.Update (table, vals, wher) => let - val fWhere = sqexpToFormula wher + val fWhere = sqexpToFormula (renameTables [(table, "T")] wher) fun fVals marks = valsToFormula marks (table, vals) val modifiedFields = SS.addList (SS.empty, map #1 vals) (* TODO: don't use field name hack. *) - fun markFields table = - fn e as Sql.Field (t, v) => if t = table andalso SS.member (modifiedFields, v) - then Sql.Field (t, v ^ "'") - else e - | Sql.SqNot e => Sql.SqNot (markFields table e) - | Sql.Binop (r, e1, e2) => Sql.Binop (r, markFields table e1, markFields table e2) - | Sql.SqKnown e => Sql.SqKnown (markFields table e) - | Sql.SqFunc (s, e) => Sql.SqFunc (s, markFields table e) - | e => e - val mark = mapFormulaExps (markFields "T") + val markFields = + mapSqexpFields (fn (t, v) => if t = table andalso SS.member (modifiedFields, v) + then ((* DEBUG *) print ("yep" ^ Int.toString (length (!gunk0))); + Sql.Field (t, v ^ "'")) + else ((* DEBUG *) print (table ^ " " ^ t ^ "\n"); Sql.Field (t, v))) + val mark = mapFormulaExps markFields in - (* Inside renameTables, we mark with table "T". Outside, we use the real table name. *) - (renameTables [(table, "T")] - (Combo (Disj, [Combo (Conj, [fVals (id, markFields "T"), mark fWhere]), - Combo (Conj, [fVals (markFields "T", id), fWhere])])), - SOME (markFields table)) + ((Combo (Disj, [Combo (Conj, [fVals (id, markFields), mark fWhere]), + Combo (Conj, [fVals (markFields, id), fWhere])])), + SOME markFields) end fun pairToFormulas (query, dml) = let - val (fDml, marker) = dmlToFormulaMarker dml + val (fDml, marker) = ((* DEBUG *) print "dml\n"; dmlToFormulaMarker dml) in + (* DEBUG *) print "query\n"; (queryToFormula marker query, fDml) end -(* structure ToFormula = struct *) - -(* val testOfQuery : Sql.query1 -> (Sql.cmp * Sql.sqexp * Sql.sqexp) formula = *) -(* fn {From = tablePairs, Where = SOME e, ...} => renameTables tablePairs (sqexpToFormula e) *) -(* | {Where = NONE, ...} => Combo (Conj, []) *) - -(* (* If selecting some parsable subset of fields, says which ones. [NONE] *) -(* means anything could be selected. *) *) -(* fun fieldsOfQuery (q : Sql.query1) = *) -(* osequence (map (fn Sql.SqField tf => SOME tf *) -(* | Sql.SqExp (Sql.Field tf) => SOME tf *) -(* | _ => NONE) (#Select q)) *) - -(* fun fieldsOfVals (table, vals, wher) = *) -(* let *) -(* val fWhere = renameTables [(table, "T")] (sqexpToFormula wher) *) -(* val fVals = valsToFormula (table, vals) *) -(* val modifiedFields = SS.addList (SS.empty, map #1 vals) *) -(* (* TODO: don't use field name hack. *) *) -(* val markField = *) -(* fn e as Sql.Field (t, v) => if SS.member (modifiedFields, v) *) -(* then Sql.Field (t, v ^ "'") *) -(* else e *) -(* | e => e *) -(* val mark = mapFormula (fn (cmp, e1, e2) => (cmp, markField e1, markField e2)) *) -(* in *) -(* renameTables [(table, "T")] *) -(* (Combo (Disj, [Combo (Conj, [fVals, mark fWhere]), *) -(* Combo (Conj, [mark fVals, fWhere])])) *) -(* end *) -(* end *) - structure ConflictMaps = struct structure TK = TripleKeyFn(structure I = CmpKey @@ -716,7 +688,7 @@ structure ConflictMaps = struct atoms val contradiction = fn (cmp, SOME ae1, SOME ae2) => (cmp = Sql.Ne orelse cmp = Sql.Lt orelse cmp = Sql.Gt) - andalso not (UF.together (uf, ae1, ae2)) + andalso UF.together (uf, ae1, ae2) (* If we don't know one side of the comparision, not a contradiction. *) | _ => false in @@ -814,7 +786,9 @@ structure ConflictMaps = struct val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass) o List.mapPartial equivClasses + o (fn x => (gunk1 := x :: !gunk1; x)) o dnf + o (fn x => (gunk0 := x :: !gunk0; x)) end @@ -1317,10 +1291,6 @@ end val invalidations = Invalidations.invalidations -(* DEBUG *) -(* val gunk : ((Sql.query * int) * Sql.dml) list ref = ref [] *) -(* val gunk' : exp list ref = ref [] *) - fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state), effs) = let val flushes = List.concat @@ -1329,7 +1299,7 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state fn dmlExp as EDml (dmlText, failureMode) => let (* DEBUG *) - (* val () = gunk' := origDmlText :: !gunk' *) + (* val () = gunk2 := dmlText :: !gunk2 *) (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) val inval = case Sql.parse Sql.dml dmlText of @@ -1352,8 +1322,6 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state val file = fileMap doExp file in - (* DEBUG *) - (* gunk := []; *) ffiInfoRef := ffiInfo; file end -- cgit v1.2.3 From aa2c8c64542d7930773da26573e186ec3753c268 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 9 Nov 2015 13:37:31 -0500 Subject: Progress on free paths, but consolidation seems to fail more with them. --- caching-tests/test.ur | 18 +++- caching-tests/test.urp | 1 + caching-tests/test.urs | 1 + src/sources | 3 + src/sqlcache.sml | 222 ++++++++++++++++++++++++++++++++++--------------- 5 files changed, 177 insertions(+), 68 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index e08c6e47..0549840d 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -5,7 +5,23 @@ fun cache id = FROM tab WHERE tab.Id = {[id]}); return - Reading {[id]}. + (* Reading {[id]}. *) + {case res of + None => ? + | Some row => {[row.Tab.Val]}} + + +(* fun sillyRecursive {Id = id, FooBar = fooBar} = *) +(* if fooBar <= 0 *) +(* then 0 *) +(* else 1 + sillyRecursive {Id = id, FooBar = fooBar - 1} *) + +fun cacheR (r : {Id : int, FooBar : int}) = + res <- oneOrNoRows (SELECT tab.Val + FROM tab + WHERE tab.Id = {[r.Id]}); + return + (* Reading {[r.Id]}. *) {case res of None => ? | Some row => {[row.Tab.Val]}} diff --git a/caching-tests/test.urp b/caching-tests/test.urp index 62041bdd..cea8821e 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -4,5 +4,6 @@ safeGet Test/flush safeGet Test/flash safeGet Test/floosh # safeGet Test/flush17 +minHeap 4096 test diff --git a/caching-tests/test.urs b/caching-tests/test.urs index ebe6bf56..1fa5a9c2 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -1,4 +1,5 @@ val cache : int -> transaction page +val cacheR : {Id : int, FooBar : int} -> transaction page (* val cache2 : int -> int -> transaction page *) val flush : int -> transaction page val flash : int -> transaction page diff --git a/src/sources b/src/sources index 1303b46e..8bf80bc6 100644 --- a/src/sources +++ b/src/sources @@ -176,7 +176,10 @@ $(SRC)/sql.sml $(SRC)/union_find_fn.sml $(SRC)/multimap_fn.sml + +$(SRC)/list_key_fn.sml $(SRC)/option_key_fn.sml +$(SRC)/pair_key_fn.sml $(SRC)/triple_key_fn.sml $(SRC)/cache.sml diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 7b3a5225..ce383f18 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -2,6 +2,7 @@ structure Sqlcache (* DEBUG :> SQLCACHE *) = struct open Mono +structure IK = struct type ord_key = int val compare = Int.compare end structure IS = IntBinarySet structure IM = IntBinaryMap structure SK = struct type ord_key = string val compare = String.compare end @@ -330,11 +331,89 @@ val freeVars = 0 IS.empty +(* A path is a number of field projections of a variable. *) +structure PK = PairKeyFn(structure I = IK structure J = ListKeyFn(SK)) +structure PS = BinarySetFn(PK) + +(* DEBUG *) +val gunk3 : (PS.set * PS.set) list ref = ref [] +val gunk4 : (PS.set * PS.set) list ref = ref [] + +val pathOfExp = + let + fun readFields acc exp = + acc + <\obind\> + (fn fs => + case #1 exp of + ERel n => SOME (n, fs) + | EField (exp, f) => readFields (SOME (f::fs)) exp + | _ => NONE) + in + readFields (SOME []) + end + +fun expOfPath (n, fs) = + List.foldl (fn (f, exp) => (EField (exp, f), dummyLoc)) (ERel n, dummyLoc) fs + +fun freePaths'' bound exp paths = + case pathOfExp (exp, dummyLoc) of + NONE => paths + | SOME (n, fs) => if n < bound then paths else PS.add (paths, (n - bound, fs)) + +(* ASK: nicer way? :( *) +fun freePaths' bound exp = + case #1 exp of + EPrim _ => id + | e as ERel _ => freePaths'' bound e + | ENamed _ => id + | ECon (_, _, data) => (case data of NONE => id | SOME e => freePaths' bound e) + | ENone _ => id + | ESome (_, e) => freePaths' bound e + | EFfi _ => id + | EFfiApp (_, _, args) => + List.foldl (fn ((e, _), acc) => freePaths' bound e o acc) id args + | EApp (e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | EAbs (_, _, _, e) => freePaths' (bound + 1) e + | EUnop (_, e) => freePaths' bound e + | EBinop (_, _, e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | ERecord fields => List.foldl (fn ((_, e, _), acc) => freePaths' bound e o acc) id fields + | e as EField _ => freePaths'' bound e + | ECase (e, cases, _) => + List.foldl (fn ((p, e), acc) => freePaths' (bound + MonoEnv.patBindsN p) e o acc) + (freePaths' bound e) + cases + | EStrcat (e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | EError (e, _) => freePaths' bound e + | EReturnBlob {blob, mimeType = e, ...} => + freePaths' bound e o (case blob of NONE => id | SOME e => freePaths' bound e) + | ERedirect (e, _) => freePaths' bound e + | EWrite e => freePaths' bound e + | ESeq (e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | ELet (_, _, e1, e2) => freePaths' (bound + 1) e1 o freePaths' bound e2 + | EClosure (_, es) => List.foldl (fn (e, acc) => freePaths' bound e o acc) id es + | EQuery {query = e1, body = e2, initial = e3, ...} => + freePaths' bound e1 o freePaths' (bound + 2) e2 o freePaths' bound e3 + | EDml (e, _) => freePaths' bound e + | ENextval e => freePaths' bound e + | ESetval (e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | EUnurlify (e, _, _) => freePaths' bound e + | EJavaScript (_, e) => freePaths' bound e + | ESignalReturn e => freePaths' bound e + | ESignalBind (e1, e2) => freePaths' bound e1 o freePaths' bound e2 + | ESignalSource e => freePaths' bound e + | EServerCall (e, _, _, _) => freePaths' bound e + | ERecv (e, _) => freePaths' bound e + | ESleep e => freePaths' bound e + | ESpawn e => freePaths' bound e + +fun freePaths exp = freePaths' 0 exp PS.empty + datatype unbind = Known of exp | Unknowns of int datatype cacheArg = AsIs of exp | Urlify of exp -structure InvalInfo :> sig +structure InvalInfo (* DEBUG :> sig type t type state = {tableToIndices : SIMM.multimap, indexToInvalInfo : (t * int) IntBinaryMap.map, @@ -347,9 +426,10 @@ structure InvalInfo :> sig val unbind : t * unbind -> t option val union : t * t -> t val updateState : t * int * state -> state -end = struct +end *) = struct - datatype sqlArg = FreeVar of int | Sqlify of string * string * sqlArg * typ + (* Variable, field projections, possible wrapped sqlification FFI call. *) + type sqlArg = int * string list * (string * string * typ) option type subst = sqlArg IM.map @@ -361,24 +441,14 @@ end = struct ffiInfo : {index : int, params : int} list, index : int} - structure AM = BinaryMapFn(struct - type ord_key = sqlArg - (* Saw this on MLton wiki. *) - fun ifNotEq (cmp, thunk) = case cmp of - EQUAL => thunk () - | _ => cmp - fun try f x () = f x - val rec compare = - fn (FreeVar n1, FreeVar n2) => - Int.compare (n1, n2) - | (FreeVar _, _) => LESS - | (_, FreeVar _) => GREATER - | (Sqlify (m1, x1, arg1, t1), Sqlify (m2, x2, arg2, t2)) => - String.compare (m1, m2) - <\ifNotEq\> try String.compare (x1, x2) - <\ifNotEq\> try MonoUtil.Typ.compare (t1, t2) - <\ifNotEq\> try compare (arg1, arg2) - end) + structure AK = TripleKeyFn( + structure I = IK + structure J = ListKeyFn(SK) + structure K = OptionKeyFn(TripleKeyFn( + structure I = SK + structure J = SK + structure K = struct type ord_key = Mono.typ val compare = MonoUtil.Typ.compare end))) + structure AM = BinaryMapFn(AK) (* Traversal Utilities *) (* TODO: get rid of unused ones. *) @@ -423,9 +493,21 @@ end = struct fun traverseSubst (ops as (_, pure', lift, _, _, _, lift2')) f = let - val rec mp = - fn FreeVar n => f n - | Sqlify (m, x, arg, t) => lift (fn mparg => Sqlify (m, x, mparg, t)) (mp arg) + fun mp (n, fields, sqlify) = + lift (fn (n', fields', sqlify') => + let + fun wrap sq = (n', fields' @ fields, sq) + in + case (fields', sqlify', fields, sqlify) of + (_, NONE, _, NONE) => wrap NONE + | (_, NONE, _, sq as SOME _) => wrap sq + (* Last case should suffice because we don't + project from a sqlified value (which is a + string). *) + | (_, sq as SOME _, [], NONE) => wrap sq + | _ => raise Match + end) + (f n) in traverseIM ops (fn (_, v) => mp v) end @@ -447,7 +529,7 @@ end = struct IS.empty (fn e' => freeVars (e', dummyLoc)) - val varsOfSubst = foldMapSubst IS.union IS.empty IS.singleton + fun varsOfSubst subst = foldMapSubst IS.union IS.empty IS.singleton subst val varsOfList = fn [] => IS.empty @@ -457,7 +539,7 @@ end = struct val empty = [] - fun singleton q = [(q, IS.foldl (fn (n, acc) => IM.insert (acc, n, FreeVar n)) + fun singleton q = [(q, IS.foldl (fn (n, acc) => IM.insert (acc, n, (n, [], NONE))) IM.empty (varsOfQuery q))] @@ -477,21 +559,30 @@ end = struct AM.map count args end - val rec expOfArg = - fn FreeVar n => (ERel n, dummyLoc) - | Sqlify (m, x, arg, t) => (EFfiApp (m, x, [(expOfArg arg, t)]), dummyLoc) + fun expOfArg (n, fields, sqlify) = + let + val exp = List.foldl (fn (field, exp) => (EField (exp, field), dummyLoc)) + (ERel n, dummyLoc) + fields + in + case sqlify of + NONE => exp + | SOME (m, x, typ) => (EFfiApp (m, x, [(exp, typ)]), dummyLoc) + end - fun orderArgs (qs : t, vars) = + fun orderArgs (qs : t, paths) = let fun erel n = (ERel n, dummyLoc) val argsMap = sqlArgsMap qs val args = map (expOfArg o #1) (AM.listItemsi argsMap) - val invalVars = List.foldl IS.union IS.empty (map freeVars args) + val invalPaths = List.foldl PS.union PS.empty (map freePaths args) + (* DEBUG *) + val () = gunk3 := (paths, invalPaths) :: !gunk3 in (* Put arguments we might invalidate by first. *) map AsIs args (* TODO: make sure these variables are okay to remove from the argument list. *) - @ map (Urlify o erel) (IS.listItems (IS.difference (vars, invalVars))) + @ map (Urlify o expOfPath) (PS.listItems (PS.difference (paths, invalPaths))) end (* As a kludge, we rename the variables in the query to correspond to the @@ -527,13 +618,23 @@ end = struct | [] => raise Match end - val rec argOfExp = - fn (ERel n, _) => SOME (FreeVar n) - | (EFfiApp ("Basis", x, [(exp, t)]), _) => - if String.isPrefix "sqlify" x - then omap (fn arg => Sqlify ("Basis", x, arg, t)) (argOfExp exp) - else NONE - | _ => NONE + val argOfExp = + let + fun doFields acc exp = + acc + <\obind\> + (fn (fs, sqlify) => + case #1 exp of + ERel n => SOME (n, fs, sqlify) + | EField (exp, f) => doFields (SOME (f::fs, sqlify)) exp + | _ => NONE) + in + fn (EFfiApp ("Basis", x, [(exp, typ)]), _) => + if String.isPrefix "sqlify" x + then doFields (SOME ([], SOME ("Basis", x, typ))) exp + else NONE + | exp => doFields (SOME ([], NONE)) exp + end val unbind1 = fn Known e => @@ -541,9 +642,9 @@ end = struct val replacement = argOfExp e in omapSubst (fn 0 => replacement - | n => SOME (FreeVar (n-1))) + | n => SOME (n-1, [], NONE)) end - | Unknowns k => omapSubst (fn n => if n >= k then NONE else SOME (FreeVar (n-k))) + | Unknowns k => omapSubst (fn n => if n < k then NONE else SOME (n-k, [], NONE)) fun unbind (qs, ub) = case ub of @@ -647,9 +748,8 @@ val rec dmlToFormulaMarker = (* TODO: don't use field name hack. *) val markFields = mapSqexpFields (fn (t, v) => if t = table andalso SS.member (modifiedFields, v) - then ((* DEBUG *) print ("yep" ^ Int.toString (length (!gunk0))); - Sql.Field (t, v ^ "'")) - else ((* DEBUG *) print (table ^ " " ^ t ^ "\n"); Sql.Field (t, v))) + then Sql.Field (t, v ^ "'") + else Sql.Field (t, v)) val mark = mapFormulaExps markFields in ((Combo (Disj, [Combo (Conj, [fVals (id, markFields), mark fWhere]), @@ -659,9 +759,8 @@ val rec dmlToFormulaMarker = fun pairToFormulas (query, dml) = let - val (fDml, marker) = ((* DEBUG *) print "dml\n"; dmlToFormulaMarker dml) + val (fDml, marker) = dmlToFormulaMarker dml in - (* DEBUG *) print "query\n"; (queryToFormula marker query, fDml) end @@ -993,7 +1092,7 @@ fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = | EClosure _ => NONE | EUnurlify (_, t, _) => SOME t | EQuery {state, ...} => SOME state - | _ => NONE + | e => NONE and typOfExp env (e', loc) = typOfExp' env e' @@ -1002,22 +1101,6 @@ and typOfExp env (e', loc) = typOfExp' env e' (* Caching *) (***********) -(* - -To get the invalidations for a dml, we need (each <- is list-monad-y): - * table <- dml - * cache <- table - * query <- cache - * inval <- (query, dml), -where inval is a list of query argument indices, so - * way to change query args in inval to cache args. -For now, the last one is just - * a map from query arg number to the corresponding free variable (per query) - * a map from free variable to cache arg number (per cache). -Both queries and caches should have IDs. - -*) - type state = InvalInfo.state datatype subexp = Cachable of InvalInfo.t * (state -> exp * state) | Impure of exp @@ -1062,7 +1145,7 @@ fun cacheWrap (env, exp, typ, args, index) = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 (* TODO: pick a number. *) -val sizeWorthCaching = 5 +val sizeWorthCaching = ~1 val worthCaching = fn EQuery _ => true @@ -1074,7 +1157,7 @@ fun cacheExp (env, exp', invalInfo, state : state) = | SOME (TFun _, _) => NONE | SOME typ => let - val args = InvalInfo.orderArgs (invalInfo, freeVars (exp', dummyLoc)) + val args = InvalInfo.orderArgs (invalInfo, freePaths (exp', dummyLoc)) val numArgs = length args in (List.foldr (fn (arg, acc) => acc @@ -1135,7 +1218,12 @@ fun cacheQuery (effs, env, q) : subexp = | SOME subexp => subexp end -fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = +(* DEBUG *) +(* fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = *) +(* (Print.preface ("cacheTree> ", MonoPrint.p_exp MonoEnv.empty exp); *) +(* cacheTree' effs ((env, exp), state)) *) + +and cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = let fun wrapBindN (f : exp list -> exp') (args : ((MonoEnv.env * exp) * unbind) list) = @@ -1300,7 +1388,7 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state let (* DEBUG *) (* val () = gunk2 := dmlText :: !gunk2 *) - (* val () = Print.preface ("SQLCACHE: ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) + (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => -- cgit v1.2.3 From 91e5530ace053b1b9ea3a1fe64ce638ae6493314 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Mon, 9 Nov 2015 13:38:04 -0500 Subject: Add new files missing from previous commit. --- src/list_key_fn.sml | 14 ++++++++++++++ src/pair_key_fn.sml | 12 ++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 src/list_key_fn.sml create mode 100644 src/pair_key_fn.sml (limited to 'src') diff --git a/src/list_key_fn.sml b/src/list_key_fn.sml new file mode 100644 index 00000000..ec2bd26f --- /dev/null +++ b/src/list_key_fn.sml @@ -0,0 +1,14 @@ +functor ListKeyFn(K : ORD_KEY) + : ORD_KEY where type ord_key = K.ord_key list = struct + +type ord_key = K.ord_key list + +val rec compare = + fn ([], []) => EQUAL + | ([], _) => LESS + | (_, []) => GREATER + | (x::xs, y::ys) => case K.compare (x, y) of + EQUAL => compare (xs, ys) + | ord => ord + +end diff --git a/src/pair_key_fn.sml b/src/pair_key_fn.sml new file mode 100644 index 00000000..cd33950d --- /dev/null +++ b/src/pair_key_fn.sml @@ -0,0 +1,12 @@ +functor PairKeyFn (structure I : ORD_KEY + structure J : ORD_KEY) + : ORD_KEY where type ord_key = I.ord_key * J.ord_key = struct + +type ord_key = I.ord_key * J.ord_key + +fun compare ((i1, j1), (i2, j2)) = + case I.compare (i1, i2) of + EQUAL => J.compare (j1, j2) + | ord => ord + +end -- cgit v1.2.3 From b7d668bb4647c4216df7120b4b8f8d5c6e8257f0 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 10 Nov 2015 12:35:00 -0500 Subject: Fix bug in and clean up free path code. --- caching-tests/test.ur | 12 ++-- src/sqlcache.sml | 151 +++++++++++++++++++++++++------------------------- 2 files changed, 81 insertions(+), 82 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index 0549840d..cbfde556 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -5,23 +5,23 @@ fun cache id = FROM tab WHERE tab.Id = {[id]}); return - (* Reading {[id]}. *) + cache {case res of None => ? | Some row => {[row.Tab.Val]}} -(* fun sillyRecursive {Id = id, FooBar = fooBar} = *) -(* if fooBar <= 0 *) -(* then 0 *) -(* else 1 + sillyRecursive {Id = id, FooBar = fooBar - 1} *) +fun sillyRecursive {Id = id : int, FooBar = fooBar} = + if fooBar <= 0 + then 0 + else 1 + sillyRecursive {Id = id, FooBar = fooBar - 1} fun cacheR (r : {Id : int, FooBar : int}) = res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[r.Id]}); return - (* Reading {[r.Id]}. *) + cacheR {[r.FooBar]} {case res of None => ? | Some row => {[row.Tab.Val]}} diff --git a/src/sqlcache.sml b/src/sqlcache.sml index ce383f18..5a748496 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache (* DEBUG :> SQLCACHE *) = struct +structure Sqlcache :> SQLCACHE = struct open Mono @@ -51,9 +51,13 @@ val ffiEffectful = andalso not (m = "Basis" andalso SS.member (okayWrites, f)) end -val cache = ref LruCache.cache -fun setCache c = cache := c -fun getCache () = !cache +val cacheRef = ref LruCache.cache +fun setCache c = cacheRef := c +fun getCache () = !cacheRef + +val alwaysConsolidateRef = ref true +fun setAlwaysConsolidate b = alwaysConsolidateRef := b +fun getAlwaysConsolidate () = !alwaysConsolidateRef (* Used to have type context for local variables in MonoUtil functions. *) val doBind = @@ -63,6 +67,17 @@ val doBind = val dummyLoc = ErrorMsg.dummySpan +(* DEBUG *) +fun printExp msg exp = Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_exp MonoEnv.empty exp) +fun printExp' msg exp' = printExp msg (exp', dummyLoc) +fun printTyp msg typ = Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_typ MonoEnv.empty typ) +fun printTyp' msg typ' = printTyp msg (typ', dummyLoc) +fun obindDebug printer (x, f) = + case x of + NONE => NONE + | SOME x' => case f x' of + NONE => (printer (); NONE) + | y => y (*********************) (* General Utilities *) @@ -332,13 +347,10 @@ val freeVars = IS.empty (* A path is a number of field projections of a variable. *) +type path = int * string list structure PK = PairKeyFn(structure I = IK structure J = ListKeyFn(SK)) structure PS = BinarySetFn(PK) -(* DEBUG *) -val gunk3 : (PS.set * PS.set) list ref = ref [] -val gunk4 : (PS.set * PS.set) list ref = ref [] - val pathOfExp = let fun readFields acc exp = @@ -380,7 +392,7 @@ fun freePaths' bound exp = | ERecord fields => List.foldl (fn ((_, e, _), acc) => freePaths' bound e o acc) id fields | e as EField _ => freePaths'' bound e | ECase (e, cases, _) => - List.foldl (fn ((p, e), acc) => freePaths' (bound + MonoEnv.patBindsN p) e o acc) + List.foldl (fn ((p, e), acc) => freePaths' (MonoEnv.patBindsN p + bound) e o acc) (freePaths' bound e) cases | EStrcat (e1, e2) => freePaths' bound e1 o freePaths' bound e2 @@ -390,7 +402,7 @@ fun freePaths' bound exp = | ERedirect (e, _) => freePaths' bound e | EWrite e => freePaths' bound e | ESeq (e1, e2) => freePaths' bound e1 o freePaths' bound e2 - | ELet (_, _, e1, e2) => freePaths' (bound + 1) e1 o freePaths' bound e2 + | ELet (_, _, e1, e2) => freePaths' bound e1 o freePaths' (bound + 1) e2 | EClosure (_, es) => List.foldl (fn (e, acc) => freePaths' bound e o acc) id es | EQuery {query = e1, body = e2, initial = e3, ...} => freePaths' bound e1 o freePaths' (bound + 2) e2 o freePaths' bound e3 @@ -413,7 +425,7 @@ datatype unbind = Known of exp | Unknowns of int datatype cacheArg = AsIs of exp | Urlify of exp -structure InvalInfo (* DEBUG :> sig +structure InvalInfo :> sig type t type state = {tableToIndices : SIMM.multimap, indexToInvalInfo : (t * int) IntBinaryMap.map, @@ -422,14 +434,14 @@ structure InvalInfo (* DEBUG :> sig val empty : t val singleton : Sql.query -> t val query : t -> Sql.query - val orderArgs : t * IS.set -> cacheArg list + val orderArgs : t * Mono.exp -> cacheArg list val unbind : t * unbind -> t option val union : t * t -> t val updateState : t * int * state -> state -end *) = struct +end = struct (* Variable, field projections, possible wrapped sqlification FFI call. *) - type sqlArg = int * string list * (string * string * typ) option + type sqlArg = path * (string * string * typ) option type subst = sqlArg IM.map @@ -441,10 +453,9 @@ end *) = struct ffiInfo : {index : int, params : int} list, index : int} - structure AK = TripleKeyFn( - structure I = IK - structure J = ListKeyFn(SK) - structure K = OptionKeyFn(TripleKeyFn( + structure AK = PairKeyFn( + structure I = PK + structure J = OptionKeyFn(TripleKeyFn( structure I = SK structure J = SK structure K = struct type ord_key = Mono.typ val compare = MonoUtil.Typ.compare end))) @@ -493,10 +504,10 @@ end *) = struct fun traverseSubst (ops as (_, pure', lift, _, _, _, lift2')) f = let - fun mp (n, fields, sqlify) = - lift (fn (n', fields', sqlify') => + fun mp ((n, fields), sqlify) = + lift (fn ((n', fields'), sqlify') => let - fun wrap sq = (n', fields' @ fields, sq) + fun wrap sq = ((n', fields' @ fields), sq) in case (fields', sqlify', fields, sqlify) of (_, NONE, _, NONE) => wrap NONE @@ -539,7 +550,7 @@ end *) = struct val empty = [] - fun singleton q = [(q, IS.foldl (fn (n, acc) => IM.insert (acc, n, (n, [], NONE))) + fun singleton q = [(q, IS.foldl (fn (n, acc) => IM.insert (acc, n, ((n, []), NONE))) IM.empty (varsOfQuery q))] @@ -559,25 +570,22 @@ end *) = struct AM.map count args end - fun expOfArg (n, fields, sqlify) = + fun expOfArg (path, sqlify) = let - val exp = List.foldl (fn (field, exp) => (EField (exp, field), dummyLoc)) - (ERel n, dummyLoc) - fields + val exp = expOfPath path in case sqlify of NONE => exp | SOME (m, x, typ) => (EFfiApp (m, x, [(exp, typ)]), dummyLoc) end - fun orderArgs (qs : t, paths) = + fun orderArgs (qs : t, exp) = let + val paths = freePaths exp fun erel n = (ERel n, dummyLoc) val argsMap = sqlArgsMap qs val args = map (expOfArg o #1) (AM.listItemsi argsMap) val invalPaths = List.foldl PS.union PS.empty (map freePaths args) - (* DEBUG *) - val () = gunk3 := (paths, invalPaths) :: !gunk3 in (* Put arguments we might invalidate by first. *) map AsIs args @@ -631,9 +639,9 @@ end *) = struct in fn (EFfiApp ("Basis", x, [(exp, typ)]), _) => if String.isPrefix "sqlify" x - then doFields (SOME ([], SOME ("Basis", x, typ))) exp + then omap (fn path => (path, SOME ("Basis", x, typ))) (pathOfExp exp) else NONE - | exp => doFields (SOME ([], NONE)) exp + | exp => omap (fn path => (path, NONE)) (pathOfExp exp) end val unbind1 = @@ -642,9 +650,9 @@ end *) = struct val replacement = argOfExp e in omapSubst (fn 0 => replacement - | n => SOME (n-1, [], NONE)) + | n => SOME ((n-1, []), NONE)) end - | Unknowns k => omapSubst (fn n => if n < k then NONE else SOME (n-k, [], NONE)) + | Unknowns k => omapSubst (fn n => if n < k then NONE else SOME ((n-k, []), NONE)) fun unbind (qs, ub) = case ub of @@ -668,12 +676,6 @@ end *) = struct end -(* DEBUG *) -val gunk0 : ((Sql.cmp * Sql.sqexp * Sql.sqexp) formula - * (Sql.cmp * Sql.sqexp * Sql.sqexp) formula) list ref = ref [] -val gunk1 : (Sql.cmp * atomExp option * atomExp option) list list list ref = ref [] -val gunk2 : exp list ref = ref [] - structure UF = UnionFindFn(AtomExpKey) val rec sqexpToFormula = @@ -885,9 +887,7 @@ structure ConflictMaps = struct val conflictMaps = List.mapPartial (mergeEqs o map eqsOfClass) o List.mapPartial equivClasses - o (fn x => (gunk1 := x :: !gunk1; x)) o dnf - o (fn x => (gunk0 := x :: !gunk0; x)) end @@ -1145,41 +1145,50 @@ fun cacheWrap (env, exp, typ, args, index) = val expSize = MonoUtil.Exp.fold {typ = #2, exp = fn (_, n) => n+1} 0 (* TODO: pick a number. *) -val sizeWorthCaching = ~1 +val sizeWorthCaching = 5 val worthCaching = fn EQuery _ => true | exp' => expSize (exp', dummyLoc) > sizeWorthCaching +fun shouldConsolidate args = + let + val isAsIs = fn AsIs _ => true | Urlify _ => false + in + getAlwaysConsolidate () + orelse not (List.exists isAsIs args andalso List.exists (not o isAsIs) args) + end + fun cacheExp (env, exp', invalInfo, state : state) = case worthCaching exp' <\oguard\> typOfExp' env exp' of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => let - val args = InvalInfo.orderArgs (invalInfo, freePaths (exp', dummyLoc)) - val numArgs = length args - in (List.foldr (fn (arg, acc) => - acc - <\obind\> - (fn args' => - (case arg of - AsIs exp => SOME exp - | Urlify exp => - typOfExp env exp - <\obind\> - (fn typ => - (MonoFooify.urlify env (exp, typ)))) - <\obind\> - (fn arg' => SOME (arg' :: args')))) - (SOME []) - args) - <\obind\> - (fn args' => - cacheWrap (env, (exp', dummyLoc), typ, args', #index state) - <\obind\> - (fn cachedExp => - SOME (cachedExp, InvalInfo.updateState (invalInfo, numArgs, state)))) + val args = InvalInfo.orderArgs (invalInfo, (exp', dummyLoc)) + in + shouldConsolidate args + <\oguard\> + List.foldr (fn (arg, acc) => + acc + <\obind\> + (fn args' => + (case arg of + AsIs exp => SOME exp + | Urlify exp => + typOfExp env exp + <\obind\> + (fn typ => (MonoFooify.urlify env (exp, typ)))) + <\obind\> + (fn arg' => SOME (arg' :: args')))) + (SOME []) + args + <\obind\> + (fn args' => + cacheWrap (env, (exp', dummyLoc), typ, args', #index state) + <\obind\> + (fn cachedExp => + SOME (cachedExp, InvalInfo.updateState (invalInfo, length args', state)))) end fun cacheQuery (effs, env, q) : subexp = @@ -1194,8 +1203,6 @@ fun cacheQuery (effs, env, q) : subexp = bound env) val {query = queryText, initial, body, ...} = q - (* DEBUG *) - (* val () = Print.preface ("sqlcache> ", MonoPrint.p_exp MonoEnv.empty queryText) *) val attempt = (* Ziv misses Haskell's do notation.... *) (safe 0 queryText andalso safe 0 initial andalso safe 2 body) @@ -1218,12 +1225,7 @@ fun cacheQuery (effs, env, q) : subexp = | SOME subexp => subexp end -(* DEBUG *) -(* fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = *) -(* (Print.preface ("cacheTree> ", MonoPrint.p_exp MonoEnv.empty exp); *) -(* cacheTree' effs ((env, exp), state)) *) - -and cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = +fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = let fun wrapBindN (f : exp list -> exp') (args : ((MonoEnv.env * exp) * unbind) list) = @@ -1386,9 +1388,6 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state val doExp = fn dmlExp as EDml (dmlText, failureMode) => let - (* DEBUG *) - (* val () = gunk2 := dmlText :: !gunk2 *) - (* val () = Print.preface ("sqlcache> ", (MonoPrint.p_exp MonoEnv.empty origDmlText)) *) val inval = case Sql.parse Sql.dml dmlText of SOME dmlParsed => -- cgit v1.2.3 From 7b14b2f01fd0218c0bbe0a5c4071fff190c91ce1 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Wed, 11 Nov 2015 20:01:48 -0500 Subject: Rewrite LRU cache. Now uses one big hash table and is less buggy. --- include/urweb/types_cpp.h | 29 +++--- include/urweb/urweb_cpp.h | 8 +- src/c/urweb.c | 240 ++++++++++++++++++++++++---------------------- src/lru_cache.sml | 15 +-- 4 files changed, 147 insertions(+), 145 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 84423105..4847a3fd 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -123,31 +123,24 @@ typedef struct { #include "uthash.h" -typedef struct uw_Sqlcache_CacheValue { +typedef struct uw_Sqlcache_Value { char *result; char *output; -} uw_Sqlcache_CacheValue; + unsigned long timeValid; +} uw_Sqlcache_Value; -typedef struct uw_Sqlcache_CacheEntry { +typedef struct uw_Sqlcache_Entry { char *key; - void *value; - time_t timeValid; - struct uw_Sqlcache_CacheEntry *prev; - struct uw_Sqlcache_CacheEntry *next; + uw_Sqlcache_Value *value; + unsigned long timeInvalid; UT_hash_handle hh; -} uw_Sqlcache_CacheEntry; - -typedef struct uw_Sqlcache_CacheList { - uw_Sqlcache_CacheEntry *first; - uw_Sqlcache_CacheEntry *last; - int size; -} uw_Sqlcache_CacheList; +} uw_Sqlcache_Entry; typedef struct uw_Sqlcache_Cache { - uw_Sqlcache_CacheEntry *table; - time_t timeInvalid; - uw_Sqlcache_CacheList *lru; - int height; + struct uw_Sqlcache_Entry *table; + unsigned long timeInvalid; + unsigned long timeNow; + UT_hash_handle hh; } uw_Sqlcache_Cache; #endif diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index ab2a91c1..52e54372 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -405,10 +405,8 @@ void uw_Basis_writec(struct uw_context *, char); // Sqlcache. -#include "uthash.h" - -uw_Sqlcache_CacheValue *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **); -uw_Sqlcache_CacheValue *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, uw_Sqlcache_CacheValue *); -uw_Sqlcache_CacheValue *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **); +uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **, int); +void *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, int, uw_Sqlcache_Value *); +void *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **, int); #endif diff --git a/src/c/urweb.c b/src/c/urweb.c index ef7eb9bb..09d04f1c 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4532,144 +4532,154 @@ void uw_set_remoteSock(uw_context ctx, int sock) { // Sqlcache -void uw_Sqlcache_listDelete(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { - if (list->first == entry) { - list->first = entry->next; - } - if (list->last == entry) { - list->last = entry->prev; - } - if (entry->prev) { - entry->prev->next = entry->next; - } - if (entry->next) { - entry->next->prev = entry->prev; +void uw_Sqlcache_freeValue(uw_Sqlcache_Value *value) { + if (value) { + free(value->result); + free(value->output); + free(value); } - entry->prev = NULL; - entry->next = NULL; - --(list->size); } -void uw_Sqlcache_listAdd(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { - if (list->last) { - list->last->next = entry; - entry->prev = list->last; - list->last = entry; - } else { - list->first = entry; - list->last = entry; +void uw_Sqlcache_freeEntry(uw_Sqlcache_Entry* entry) { + if (entry) { + free(entry->key); + uw_Sqlcache_freeValue(entry->value); + free(entry); } - ++(list->size); -} - -void uw_Sqlcache_listBump(uw_Sqlcache_CacheList *list, uw_Sqlcache_CacheEntry *entry) { - uw_Sqlcache_listDelete(list, entry); - uw_Sqlcache_listAdd(list, entry); } -// TODO: deal with time properly. +// TODO: pick a number. +unsigned int uw_Sqlcache_maxSize = 1234567890; -time_t uw_Sqlcache_getTimeNow() { - return time(NULL); +void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry) { + HASH_DEL(cache->table, entry); + uw_Sqlcache_freeEntry(entry); } -time_t uw_Sqlcache_timeMax(time_t x, time_t y) { - return difftime(x, y) > 0 ? x : y; +uw_Sqlcache_Entry *uw_Sqlcache_find(uw_Sqlcache_Cache *cache, char *key, size_t len, int bump) { + uw_Sqlcache_Entry *entry = NULL; + HASH_FIND(hh, cache->table, key, len, entry); + if (entry && bump) { + // Bump for LRU purposes. + HASH_DEL(cache->table, entry); + // Important that we use [entry->key], because [key] might be ephemeral. + HASH_ADD_KEYPTR(hh, cache->table, entry->key, len, entry); + } + return entry; } -void uw_Sqlcache_free(uw_Sqlcache_CacheValue *value) { - if (value) { - free(value->result); - free(value->output); - free(value); +void uw_Sqlcache_add(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry, size_t len) { + HASH_ADD_KEYPTR(hh, cache->table, entry->key, len, entry); + if (HASH_COUNT(cache->table) > uw_Sqlcache_maxSize) { + // Deletes the first element of the cache. + uw_Sqlcache_delete(cache, cache->table); } } -void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_CacheEntry* entry) { - //uw_Sqlcache_listUw_Sqlcache_Delete(cache->lru, entry); - HASH_DELETE(hh, cache->table, entry); - uw_Sqlcache_free(entry->value); - free(entry->key); - free(entry); -} - -uw_Sqlcache_CacheValue *uw_Sqlcache_checkHelper(uw_Sqlcache_Cache *cache, char **keys, int timeInvalid) { - char *key = keys[cache->height]; - uw_Sqlcache_CacheEntry *entry; - HASH_FIND(hh, cache->table, key, strlen(key), entry); - timeInvalid = uw_Sqlcache_timeMax(timeInvalid, cache->timeInvalid); - if (entry && difftime(entry->timeValid, timeInvalid) > 0) { - if (cache->height == 0) { - // At height 0, entry->value is the desired value. - //uw_Sqlcache_listBump(cache->lru, entry); - return entry->value; - } else { - // At height n+1, entry->value is a pointer to a cache at heignt n. - return uw_Sqlcache_checkHelper(entry->value, keys, timeInvalid); - } - } else { - return NULL; - } +unsigned long uw_Sqlcache_getTimeNow(uw_Sqlcache_Cache *cache) { + return ++cache->timeNow; } -uw_Sqlcache_CacheValue *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { - return uw_Sqlcache_checkHelper(cache, keys, 0); +unsigned long uw_Sqlcache_timeMax(unsigned long x, unsigned long y) { + return x > y ? x : y; } -void uw_Sqlcache_storeHelper(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_CacheValue *value, int timeNow) { - uw_Sqlcache_CacheEntry *entry; - char *key = keys[cache->height]; - HASH_FIND(hh, cache->table, key, strlen(key), entry); - if (!entry) { - entry = malloc(sizeof(uw_Sqlcache_CacheEntry)); - entry->key = strdup(key); - entry->value = NULL; - HASH_ADD_KEYPTR(hh, cache->table, entry->key, strlen(entry->key), entry); +char uw_Sqlcache_keySep = '_'; + +char *uw_Sqlcache_allocKeyBuffer(char **keys, int numKeys) { + size_t len = 0; + while (numKeys-- > 0) { + char* k = keys[numKeys]; + if (!k) { + // Can only happen when flushihg, in which case we don't need anything past the null key. + break; + } + // Leave room for separator. + len += 1 + strlen(k); } - entry->timeValid = timeNow; - if (cache->height == 0) { - //uw_Sqlcache_listAdd(cache->lru, entry); - uw_Sqlcache_free(entry->value); - entry->value = value; - //if (cache->lru->size > MAX_SIZE) { - //uw_Sqlcache_delete(cache, cache->lru->first); - // TODO: return flushed value. - //} - } else { - if (!entry->value) { - uw_Sqlcache_Cache *newuw_Sqlcache_Cache = malloc(sizeof(uw_Sqlcache_Cache)); - newuw_Sqlcache_Cache->table = NULL; - newuw_Sqlcache_Cache->timeInvalid = timeNow; - newuw_Sqlcache_Cache->lru = cache->lru; - newuw_Sqlcache_Cache->height = cache->height - 1; - entry->value = newuw_Sqlcache_Cache; + char *buf = malloc(len+1); + // If nothing is copied into the buffer, it should look like it has length 0. + buf[0] = 0; + return buf; +} + +char *uw_Sqlcache_keyCopy(char *buf, char *key) { + *buf++ = uw_Sqlcache_keySep; + return stpcpy(buf, key); +} + +// The NUL-terminated prefix of [key] below always looks something like "_k1_k2_k3..._kn". +// TODO: strlen(key) = buf - key? + +uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys, int numKeys) { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; + time_t timeInvalid = cache->timeInvalid; + uw_Sqlcache_Entry *entry; + while (numKeys-- > 0) { + buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 1); + if (!entry) { + free(key); + return NULL; } - uw_Sqlcache_storeHelper(entry->value, keys, value, timeNow); + timeInvalid = uw_Sqlcache_timeMax(timeInvalid, entry->timeInvalid); } -} - -void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_CacheValue *value) { - uw_Sqlcache_storeHelper(cache, keys, value, uw_Sqlcache_getTimeNow()); -} - -void uw_Sqlcache_flushHelper(uw_Sqlcache_Cache *cache, char **keys, int timeNow) { - uw_Sqlcache_CacheEntry *entry; - char *key = keys[cache->height]; - if (key) { - HASH_FIND(hh, cache->table, key, strlen(key), entry); - if (entry) { - if (cache->height == 0) { - uw_Sqlcache_delete(cache, entry); + free(key); + uw_Sqlcache_Value *value = entry->value; + return value && value->timeValid > timeInvalid ? value : NULL; +} + +void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, int numKeys, uw_Sqlcache_Value *value) { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; + time_t timeNow = uw_Sqlcache_getTimeNow(cache); + uw_Sqlcache_Entry *entry; + while (numKeys-- > 0) { + buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 1); + if (!entry) { + entry = malloc(sizeof(uw_Sqlcache_Entry)); + entry->key = strdup(key); + entry->value = NULL; + entry->timeInvalid = 0; // ASK: is this okay? + uw_Sqlcache_add(cache, entry, len); + } + } + free(key); + uw_Sqlcache_freeValue(entry->value); + entry->value = value; + entry->value->timeValid = timeNow; +} + +void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys, int numKeys) { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; + time_t timeNow = uw_Sqlcache_getTimeNow(cache); + uw_Sqlcache_Entry *entry; + while (numKeys-- > 0) { + char *k = keys[numKeys]; + if (!k) { + if (entry) { + entry->timeInvalid = timeNow; } else { - uw_Sqlcache_flushHelper(entry->value, keys, timeNow); + // Haven't found an entry yet, so the first key was null. + cache->timeInvalid = timeNow; } + free(key); + return; + } + buf = uw_Sqlcache_keyCopy(buf, k); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 0); + if (!entry) { + free(key); + return; } - } else { - // Null key means invalidate the entire subtree. - cache->timeInvalid = timeNow; } -} - -void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys) { - uw_Sqlcache_flushHelper(cache, keys, uw_Sqlcache_getTimeNow()); + free(key); + // All the keys were non-null and the relevant entry is present, so we delete it. + uw_Sqlcache_delete(cache, entry); } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index e69624d8..6fcfdc55 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -62,6 +62,8 @@ fun setupQuery {index, params} = val revArgs = paramRepeatRev (fn p => "p" ^ p) ", " + val numArgs = Int.toString params + in Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), @@ -70,9 +72,7 @@ fun setupQuery {index, params} = newline, string " .timeInvalid = 0,", newline, - string " .lru = NULL,", - newline, - string (" .height = " ^ Int.toString (params - 1) ^ "};"), + string " .timeNow = 0};", newline, string ("static uw_Sqlcache_Cache *cache" ^ i ^ " = &cacheStruct" ^ i ^ ";"), newline, @@ -83,7 +83,8 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_CacheValue *v = uw_Sqlcache_check(cache" ^ i ^ ", ks);"), + string " uw_Sqlcache_Value *v = ", + string ("uw_Sqlcache_check(cache" ^ i ^ ", ks, " ^ numArgs ^ ");"), newline, (* If the output is null, it means we had too much recursion, so it's a miss. *) string " if (v && v->output != NULL) {", @@ -113,7 +114,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_CacheValue *v = malloc(sizeof(uw_Sqlcache_CacheValue));"), + string (" uw_Sqlcache_Value *v = malloc(sizeof(uw_Sqlcache_Value));"), newline, string " v->result = strdup(s);", newline, @@ -121,7 +122,7 @@ fun setupQuery {index, params} = newline, string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline, - string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, v);"), + string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, " ^ numArgs ^ ", v);"), newline, string " return uw_unit_v;", newline, @@ -134,7 +135,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks);"), + string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks, " ^ numArgs ^ ");"), newline, string " return uw_unit_v;", newline, -- cgit v1.2.3 From 011b7148c87f8b0d90abee2f454ef7689493e1f9 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 12 Nov 2015 09:15:50 -0500 Subject: Simplify C interface. --- include/urweb/types_cpp.h | 1 + include/urweb/urweb_cpp.h | 6 +++--- src/c/urweb.c | 11 +++++++---- src/lru_cache.sml | 11 +++++------ 4 files changed, 16 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 4847a3fd..3955dcc8 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -140,6 +140,7 @@ typedef struct uw_Sqlcache_Cache { struct uw_Sqlcache_Entry *table; unsigned long timeInvalid; unsigned long timeNow; + size_t numKeys; UT_hash_handle hh; } uw_Sqlcache_Cache; diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index f89c432c..15bfffac 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -406,8 +406,8 @@ void uw_Basis_writec(struct uw_context *, char); // Sqlcache. -uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **, int); -void *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, int, uw_Sqlcache_Value *); -void *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **, int); +uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **); +void *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, uw_Sqlcache_Value *); +void *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **); #endif diff --git a/src/c/urweb.c b/src/c/urweb.c index 30619314..71130cc7 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -488,7 +488,7 @@ struct uw_context { char *output_buffer; size_t output_buffer_size; - // For caching. + // Sqlcache. int numRecording; int recordingOffset; @@ -4616,7 +4616,8 @@ char *uw_Sqlcache_keyCopy(char *buf, char *key) { // The NUL-terminated prefix of [key] below always looks something like "_k1_k2_k3..._kn". // TODO: strlen(key) = buf - key? -uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys, int numKeys) { +uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { + size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; time_t timeInvalid = cache->timeInvalid; @@ -4636,7 +4637,8 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys, int return value && value->timeValid > timeInvalid ? value : NULL; } -void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, int numKeys, uw_Sqlcache_Value *value) { +void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { + size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; time_t timeNow = uw_Sqlcache_getTimeNow(cache); @@ -4659,7 +4661,8 @@ void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, int numKeys, uw_Sq entry->value->timeValid = timeNow; } -void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys, int numKeys) { +void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys) { + size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; time_t timeNow = uw_Sqlcache_getTimeNow(cache); diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 6fcfdc55..d4da2849 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -62,14 +62,14 @@ fun setupQuery {index, params} = val revArgs = paramRepeatRev (fn p => "p" ^ p) ", " - val numArgs = Int.toString params - in Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), newline, string " .table = NULL,", newline, + string (" .numKeys = " ^ Int.toString params ^ ","), + newline, string " .timeInvalid = 0,", newline, string " .timeNow = 0};", @@ -83,8 +83,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string " uw_Sqlcache_Value *v = ", - string ("uw_Sqlcache_check(cache" ^ i ^ ", ks, " ^ numArgs ^ ");"), + string (" uw_Sqlcache_Value *v = uw_Sqlcache_check(cache" ^ i ^ ", ks);"), newline, (* If the output is null, it means we had too much recursion, so it's a miss. *) string " if (v && v->output != NULL) {", @@ -122,7 +121,7 @@ fun setupQuery {index, params} = newline, string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline, - string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, " ^ numArgs ^ ", v);"), + string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", newline, @@ -135,7 +134,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks, " ^ numArgs ^ ");"), + string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks);"), newline, string " return uw_unit_v;", newline, -- cgit v1.2.3 From 7fff147bd1fad81381fb36396021c3acb33da44d Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 12 Nov 2015 09:47:20 -0500 Subject: Make cache flushes safe for transactions (not sure about LRU bump on read). --- src/c/urweb.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 71130cc7..050a06c9 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -424,6 +424,12 @@ typedef struct { void (*free)(void*); } global; +typedef struct uw_Sqlcache_Inval { + uw_Sqlcache_Cache *cache; + char **keys; + struct uw_Sqlcache_Inval *next; +} uw_Sqlcache_Inval; + struct uw_context { uw_app *app; int id; @@ -491,6 +497,7 @@ struct uw_context { // Sqlcache. int numRecording; int recordingOffset; + uw_Sqlcache_Inval *inval; int remoteSock; }; @@ -4661,7 +4668,7 @@ void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value entry->value->timeValid = timeNow; } -void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys) { +void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4691,3 +4698,43 @@ void uw_Sqlcache_flush(uw_Sqlcache_Cache *cache, char **keys) { // All the keys were non-null and the relevant entry is present, so we delete it. uw_Sqlcache_delete(cache, entry); } + +void uw_Sqlcache_flushFree(void *data, int dontCare) { + uw_Sqlcache_Inval *inval = (uw_Sqlcache_Inval *)data; + while (inval) { + char** keys = inval->keys; + size_t numKeys = inval->cache->numKeys; + while (numKeys-- > 0) { + free(keys[numKeys]); + } + free(keys); + uw_Sqlcache_Inval *nextInval = inval->next; + free(inval); + inval = nextInval; + } +} + +void uw_Sqlcache_flushCommit(void *data) { + uw_Sqlcache_Inval *inval = (uw_Sqlcache_Inval *)data; + uw_Sqlcache_Inval *invalFirst = inval; + while (inval) { + uw_Sqlcache_Cache *cache = inval->cache; + char **keys = inval->keys; + uw_Sqlcache_flushCommitOne(cache, keys); + inval = inval->next; + } + uw_Sqlcache_flushFree(invalFirst, 0); +} + +void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { + uw_Sqlcache_Inval *inval = malloc(sizeof(uw_Sqlcache_Inval)); + inval->cache = cache; + inval->keys = keys; + inval->next = NULL; + if (ctx->inval) { + ctx->inval->next = inval; + } else { + uw_register_transactional(ctx, inval, uw_Sqlcache_flushCommit, NULL, uw_Sqlcache_flushFree); + } + ctx->inval = inval; +} -- cgit v1.2.3 From ed20a67a1268bf517cfdbc1a897b659dce38f3a4 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 12 Nov 2015 10:06:07 -0500 Subject: Initialize invalidation to NULL! --- src/c/urweb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 050a06c9..55d89fd5 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -585,6 +585,7 @@ uw_context uw_init(int id, uw_loggers *lg) { ctx->numRecording = 0; ctx->recordingOffset = 0; + ctx->inval = NULL; ctx->remoteSock = -1; @@ -4732,9 +4733,11 @@ void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { inval->keys = keys; inval->next = NULL; if (ctx->inval) { + // An invalidation is already registered, so just extend it. ctx->inval->next = inval; } else { uw_register_transactional(ctx, inval, uw_Sqlcache_flushCommit, NULL, uw_Sqlcache_flushFree); } + // [ctx->inval] should always point to the last invalidation. ctx->inval = inval; } -- cgit v1.2.3 From fd7375f584790047731686345c8ce6fedee71435 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 12 Nov 2015 11:44:21 -0500 Subject: Actually use transactional machinery for flushes this time. --- include/urweb/types_cpp.h | 10 +++------- include/urweb/urweb_cpp.h | 2 +- src/c/urweb.c | 28 ++++++++++++++++++++++++---- src/lru_cache.sml | 4 +++- 4 files changed, 31 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 3955dcc8..c4af2866 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -129,15 +129,11 @@ typedef struct uw_Sqlcache_Value { unsigned long timeValid; } uw_Sqlcache_Value; -typedef struct uw_Sqlcache_Entry { - char *key; - uw_Sqlcache_Value *value; - unsigned long timeInvalid; - UT_hash_handle hh; -} uw_Sqlcache_Entry; +typedef struct uw_Sqlcache_Entry uw_Sqlcache_Entry; typedef struct uw_Sqlcache_Cache { - struct uw_Sqlcache_Entry *table; + //pthread_rwlock_t *lock; + uw_Sqlcache_Entry *table; unsigned long timeInvalid; unsigned long timeNow; size_t numKeys; diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 15bfffac..3e70b4ac 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -408,6 +408,6 @@ void uw_Basis_writec(struct uw_context *, char); uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **); void *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, uw_Sqlcache_Value *); -void *uw_Sqlcache_flush(uw_Sqlcache_Cache *, char **); +void *uw_Sqlcache_flush(struct uw_context *, uw_Sqlcache_Cache *, char **); #endif diff --git a/src/c/urweb.c b/src/c/urweb.c index 55d89fd5..4db019fe 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4545,6 +4545,13 @@ void uw_set_remoteSock(uw_context ctx, int sock) { // Sqlcache +typedef struct uw_Sqlcache_Entry { + char *key; + uw_Sqlcache_Value *value; + unsigned long timeInvalid; + UT_hash_handle hh; +} uw_Sqlcache_Entry; + void uw_Sqlcache_freeValue(uw_Sqlcache_Value *value) { if (value) { free(value->result); @@ -4599,7 +4606,7 @@ unsigned long uw_Sqlcache_timeMax(unsigned long x, unsigned long y) { char uw_Sqlcache_keySep = '_'; -char *uw_Sqlcache_allocKeyBuffer(char **keys, int numKeys) { +char *uw_Sqlcache_allocKeyBuffer(char **keys, size_t numKeys) { size_t len = 0; while (numKeys-- > 0) { char* k = keys[numKeys]; @@ -4625,6 +4632,7 @@ char *uw_Sqlcache_keyCopy(char *buf, char *key) { // TODO: strlen(key) = buf - key? uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { + //pthread_rwlock_rdlock(cache->lock); size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4642,10 +4650,12 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { } free(key); uw_Sqlcache_Value *value = entry->value; + //pthread_rwlock_unlock(cache->lock); return value && value->timeValid > timeInvalid ? value : NULL; } void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { + //pthread_rwlock_wrlock(cache->lock); size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4667,6 +4677,7 @@ void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value uw_Sqlcache_freeValue(entry->value); entry->value = value; entry->value->timeValid = timeNow; + //pthread_rwlock_unlock(cache->lock); } void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { @@ -4717,20 +4728,28 @@ void uw_Sqlcache_flushFree(void *data, int dontCare) { void uw_Sqlcache_flushCommit(void *data) { uw_Sqlcache_Inval *inval = (uw_Sqlcache_Inval *)data; - uw_Sqlcache_Inval *invalFirst = inval; while (inval) { uw_Sqlcache_Cache *cache = inval->cache; char **keys = inval->keys; uw_Sqlcache_flushCommitOne(cache, keys); inval = inval->next; } - uw_Sqlcache_flushFree(invalFirst, 0); +} + +char **uw_Sqlcache_copyKeys(char **keys, size_t numKeys) { + char **copy = malloc(sizeof(char *) * numKeys); + while (numKeys-- > 0) { + char * k = keys[numKeys]; + copy[numKeys] = k ? strdup(k) : NULL; + } + return copy; } void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { + //pthread_rwlock_wrlock(cache->lock); uw_Sqlcache_Inval *inval = malloc(sizeof(uw_Sqlcache_Inval)); inval->cache = cache; - inval->keys = keys; + inval->keys = uw_Sqlcache_copyKeys(keys, cache->numKeys); inval->next = NULL; if (ctx->inval) { // An invalidation is already registered, so just extend it. @@ -4740,4 +4759,5 @@ void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { } // [ctx->inval] should always point to the last invalidation. ctx->inval = inval; + //pthread_rwlock_unlock(cache->lock); } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index d4da2849..9d65420b 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -65,6 +65,8 @@ fun setupQuery {index, params} = in Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), + (* newline, *) + (* string " .lock = PTHREAD_RWLOCK_INITIALIZER,", *) newline, string " .table = NULL,", newline, @@ -134,7 +136,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_flush(cache" ^ i ^ ", ks);"), + string (" uw_Sqlcache_flush(ctx, cache" ^ i ^ ", ks);"), newline, string " return uw_unit_v;", newline, -- cgit v1.2.3 From 06464bd07cb1efbc9df4ca650978c14f4c20390a Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 12 Nov 2015 16:36:35 -0500 Subject: Fix committing multiple stores/flushes. Locking is WIP. --- include/urweb/types_cpp.h | 3 +- include/urweb/urweb_cpp.h | 4 +- src/c/urweb.c | 108 +++++++++++++++++++++++++++------------------- src/lru_cache.sml | 8 ++-- 4 files changed, 72 insertions(+), 51 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index c4af2866..82f8d30a 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -121,6 +121,7 @@ typedef struct { // Caching +#include #include "uthash.h" typedef struct uw_Sqlcache_Value { @@ -132,7 +133,7 @@ typedef struct uw_Sqlcache_Value { typedef struct uw_Sqlcache_Entry uw_Sqlcache_Entry; typedef struct uw_Sqlcache_Cache { - //pthread_rwlock_t *lock; + pthread_rwlock_t lock; uw_Sqlcache_Entry *table; unsigned long timeInvalid; unsigned long timeNow; diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 3e70b4ac..2c032e7b 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -406,8 +406,8 @@ void uw_Basis_writec(struct uw_context *, char); // Sqlcache. -uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *, char **); -void *uw_Sqlcache_store(uw_Sqlcache_Cache *, char **, uw_Sqlcache_Value *); +uw_Sqlcache_Value *uw_Sqlcache_check(struct uw_context *, uw_Sqlcache_Cache *, char **); +void *uw_Sqlcache_store(struct uw_context *, uw_Sqlcache_Cache *, char **, uw_Sqlcache_Value *); void *uw_Sqlcache_flush(struct uw_context *, uw_Sqlcache_Cache *, char **); #endif diff --git a/src/c/urweb.c b/src/c/urweb.c index 4db019fe..4afc7297 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -424,11 +424,12 @@ typedef struct { void (*free)(void*); } global; -typedef struct uw_Sqlcache_Inval { +typedef struct uw_Sqlcache_Update { uw_Sqlcache_Cache *cache; char **keys; - struct uw_Sqlcache_Inval *next; -} uw_Sqlcache_Inval; + uw_Sqlcache_Value *value; + struct uw_Sqlcache_Update *next; +} uw_Sqlcache_Update; struct uw_context { uw_app *app; @@ -497,7 +498,8 @@ struct uw_context { // Sqlcache. int numRecording; int recordingOffset; - uw_Sqlcache_Inval *inval; + uw_Sqlcache_Update *cacheUpdate; + uw_Sqlcache_Update *cacheUpdateTail; int remoteSock; }; @@ -508,6 +510,7 @@ size_t uw_heap_max = SIZE_MAX; size_t uw_script_max = SIZE_MAX; uw_context uw_init(int id, uw_loggers *lg) { + puts("Initializing"); uw_context ctx = malloc(sizeof(struct uw_context)); ctx->app = NULL; @@ -585,7 +588,8 @@ uw_context uw_init(int id, uw_loggers *lg) { ctx->numRecording = 0; ctx->recordingOffset = 0; - ctx->inval = NULL; + ctx->cacheUpdate = NULL; + ctx->cacheUpdateTail = NULL; ctx->remoteSock = -1; @@ -4629,10 +4633,9 @@ char *uw_Sqlcache_keyCopy(char *buf, char *key) { } // The NUL-terminated prefix of [key] below always looks something like "_k1_k2_k3..._kn". -// TODO: strlen(key) = buf - key? -uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { - //pthread_rwlock_rdlock(cache->lock); +uw_Sqlcache_Value *uw_Sqlcache_check(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { + pthread_rwlock_rdlock(&cache->lock); size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4644,18 +4647,20 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_Sqlcache_Cache *cache, char **keys) { entry = uw_Sqlcache_find(cache, key, len, 1); if (!entry) { free(key); + pthread_rwlock_unlock(&cache->lock); return NULL; } timeInvalid = uw_Sqlcache_timeMax(timeInvalid, entry->timeInvalid); } free(key); + // TODO: pass back copy of value and free it in the generated code... or use uw_malloc? uw_Sqlcache_Value *value = entry->value; - //pthread_rwlock_unlock(cache->lock); + pthread_rwlock_unlock(&cache->lock); return value && value->timeValid > timeInvalid ? value : NULL; } -void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { - //pthread_rwlock_wrlock(cache->lock); +void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { + pthread_rwlock_wrlock(&cache->lock); size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4669,7 +4674,7 @@ void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value entry = malloc(sizeof(uw_Sqlcache_Entry)); entry->key = strdup(key); entry->value = NULL; - entry->timeInvalid = 0; // ASK: is this okay? + entry->timeInvalid = 0; uw_Sqlcache_add(cache, entry, len); } } @@ -4677,10 +4682,11 @@ void uw_Sqlcache_store(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value uw_Sqlcache_freeValue(entry->value); entry->value = value; entry->value->timeValid = timeNow; - //pthread_rwlock_unlock(cache->lock); + pthread_rwlock_unlock(&cache->lock); } void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { + pthread_rwlock_wrlock(&cache->lock); size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4709,55 +4715,69 @@ void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { free(key); // All the keys were non-null and the relevant entry is present, so we delete it. uw_Sqlcache_delete(cache, entry); + pthread_rwlock_unlock(&cache->lock); } -void uw_Sqlcache_flushFree(void *data, int dontCare) { - uw_Sqlcache_Inval *inval = (uw_Sqlcache_Inval *)data; - while (inval) { - char** keys = inval->keys; - size_t numKeys = inval->cache->numKeys; +void uw_Sqlcache_freeUpdate(void *data, int dontCare) { + uw_context ctx = (uw_context)data; + uw_Sqlcache_Update *update = ctx->cacheUpdate; + while (update) { + char** keys = update->keys; + size_t numKeys = update->cache->numKeys; while (numKeys-- > 0) { free(keys[numKeys]); } free(keys); - uw_Sqlcache_Inval *nextInval = inval->next; - free(inval); - inval = nextInval; + // Don't free [update->value]: it's in the cache now! + uw_Sqlcache_Update *nextUpdate = update->next; + free(update); + update = nextUpdate; } -} - -void uw_Sqlcache_flushCommit(void *data) { - uw_Sqlcache_Inval *inval = (uw_Sqlcache_Inval *)data; - while (inval) { - uw_Sqlcache_Cache *cache = inval->cache; - char **keys = inval->keys; - uw_Sqlcache_flushCommitOne(cache, keys); - inval = inval->next; + ctx->cacheUpdate = NULL; + ctx->cacheUpdateTail = NULL; +} + +void uw_Sqlcache_commitUpdate(void *data) { + uw_context ctx = (uw_context)data; + uw_Sqlcache_Update *update = ctx->cacheUpdate; + while (update) { + uw_Sqlcache_Cache *cache = update->cache; + char **keys = update->keys; + if (update->value) { + uw_Sqlcache_storeCommitOne(cache, keys, update->value); + } else { + uw_Sqlcache_flushCommitOne(cache, keys); + } + update = update->next; } } char **uw_Sqlcache_copyKeys(char **keys, size_t numKeys) { char **copy = malloc(sizeof(char *) * numKeys); while (numKeys-- > 0) { - char * k = keys[numKeys]; + char *k = keys[numKeys]; copy[numKeys] = k ? strdup(k) : NULL; } return copy; } -void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { - //pthread_rwlock_wrlock(cache->lock); - uw_Sqlcache_Inval *inval = malloc(sizeof(uw_Sqlcache_Inval)); - inval->cache = cache; - inval->keys = uw_Sqlcache_copyKeys(keys, cache->numKeys); - inval->next = NULL; - if (ctx->inval) { - // An invalidation is already registered, so just extend it. - ctx->inval->next = inval; +void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { + uw_Sqlcache_Update *update = malloc(sizeof(uw_Sqlcache_Update)); + update->cache = cache; + update->keys = uw_Sqlcache_copyKeys(keys, cache->numKeys); + update->value = value; + update->next = NULL; + if (ctx->cacheUpdateTail) { + // An update is already registered, so just extend it. + ctx->cacheUpdateTail->next = update; } else { - uw_register_transactional(ctx, inval, uw_Sqlcache_flushCommit, NULL, uw_Sqlcache_flushFree); + ctx->cacheUpdate = update; + uw_register_transactional(ctx, ctx, uw_Sqlcache_commitUpdate, NULL, uw_Sqlcache_freeUpdate); } - // [ctx->inval] should always point to the last invalidation. - ctx->inval = inval; - //pthread_rwlock_unlock(cache->lock); + ctx->cacheUpdateTail = update; +} + +void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { + // A flush is represented in the queue as storing NULL. + uw_Sqlcache_store(ctx, cache, keys, NULL); } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 9d65420b..b6ffe700 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -65,8 +65,8 @@ fun setupQuery {index, params} = in Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), - (* newline, *) - (* string " .lock = PTHREAD_RWLOCK_INITIALIZER,", *) + newline, + string " .lock = PTHREAD_RWLOCK_INITIALIZER,", newline, string " .table = NULL,", newline, @@ -85,7 +85,7 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_Value *v = uw_Sqlcache_check(cache" ^ i ^ ", ks);"), + string (" uw_Sqlcache_Value *v = uw_Sqlcache_check(ctx, cache" ^ i ^ ", ks);"), newline, (* If the output is null, it means we had too much recursion, so it's a miss. *) string " if (v && v->output != NULL) {", @@ -123,7 +123,7 @@ fun setupQuery {index, params} = newline, string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline, - string (" uw_Sqlcache_store(cache" ^ i ^ ", ks, v);"), + string (" uw_Sqlcache_store(ctx, cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", newline, -- cgit v1.2.3 From c38edb9bd5c21bcc1d21979d40ec8e9d638b6e9c Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 13 Nov 2015 01:04:32 -0500 Subject: Fix issue with one-element caches. Locking still WIP. --- src/c/urweb.c | 95 ++++++++++++++++++++++------------ src/cache.sml | 9 ++-- src/lru_cache.sml | 29 ++++++----- src/sqlcache.sml | 149 ++++++++++++++++++++++++++++++++++++------------------ src/toy_cache.sml | 5 +- 5 files changed, 189 insertions(+), 98 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 4afc7297..02e17a0b 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4641,18 +4641,27 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_context ctx, uw_Sqlcache_Cache *cache, c char *buf = key; time_t timeInvalid = cache->timeInvalid; uw_Sqlcache_Entry *entry; - while (numKeys-- > 0) { - buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); - size_t len = buf - key; - entry = uw_Sqlcache_find(cache, key, len, 1); + if (numKeys == 0) { + entry = cache->table; if (!entry) { free(key); pthread_rwlock_unlock(&cache->lock); return NULL; } - timeInvalid = uw_Sqlcache_timeMax(timeInvalid, entry->timeInvalid); + } else { + while (numKeys-- > 0) { + buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 1); + if (!entry) { + free(key); + pthread_rwlock_unlock(&cache->lock); + return NULL; + } + timeInvalid = uw_Sqlcache_timeMax(timeInvalid, entry->timeInvalid); + } + free(key); } - free(key); // TODO: pass back copy of value and free it in the generated code... or use uw_malloc? uw_Sqlcache_Value *value = entry->value; pthread_rwlock_unlock(&cache->lock); @@ -4666,19 +4675,30 @@ void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcac char *buf = key; time_t timeNow = uw_Sqlcache_getTimeNow(cache); uw_Sqlcache_Entry *entry; - while (numKeys-- > 0) { - buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); - size_t len = buf - key; - entry = uw_Sqlcache_find(cache, key, len, 1); + if (numKeys == 0) { + entry = cache->table; if (!entry) { entry = malloc(sizeof(uw_Sqlcache_Entry)); entry->key = strdup(key); entry->value = NULL; entry->timeInvalid = 0; - uw_Sqlcache_add(cache, entry, len); + cache->table = entry; } + } else { + while (numKeys-- > 0) { + buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 1); + if (!entry) { + entry = malloc(sizeof(uw_Sqlcache_Entry)); + entry->key = strdup(key); + entry->value = NULL; + entry->timeInvalid = 0; + uw_Sqlcache_add(cache, entry, len); + } + } + free(key); } - free(key); uw_Sqlcache_freeValue(entry->value); entry->value = value; entry->value->timeValid = timeNow; @@ -4692,29 +4712,40 @@ void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { char *buf = key; time_t timeNow = uw_Sqlcache_getTimeNow(cache); uw_Sqlcache_Entry *entry; - while (numKeys-- > 0) { - char *k = keys[numKeys]; - if (!k) { - if (entry) { - entry->timeInvalid = timeNow; - } else { - // Haven't found an entry yet, so the first key was null. - cache->timeInvalid = timeNow; - } - free(key); - return; + if (numKeys == 0) { + puts("flush cache of height 0"); + entry = cache->table; + if (entry) { + uw_Sqlcache_freeValue(entry->value); + entry->value = NULL; } - buf = uw_Sqlcache_keyCopy(buf, k); - size_t len = buf - key; - entry = uw_Sqlcache_find(cache, key, len, 0); - if (!entry) { - free(key); - return; + } else { + while (numKeys-- > 0) { + char *k = keys[numKeys]; + if (!k) { + if (entry) { + entry->timeInvalid = timeNow; + } else { + // Haven't found an entry yet, so the first key was null. + cache->timeInvalid = timeNow; + } + free(key); + pthread_rwlock_unlock(&cache->lock); + return; + } + buf = uw_Sqlcache_keyCopy(buf, k); + size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 0); + if (!entry) { + free(key); + pthread_rwlock_unlock(&cache->lock); + return; + } } + free(key); + // All the keys were non-null and the relevant entry is present, so we delete it. + uw_Sqlcache_delete(cache, entry); } - free(key); - // All the keys were non-null and the relevant entry is present, so we delete it. - uw_Sqlcache_delete(cache, entry); pthread_rwlock_unlock(&cache->lock); } diff --git a/src/cache.sml b/src/cache.sml index 8de22e0d..015c3ff1 100644 --- a/src/cache.sml +++ b/src/cache.sml @@ -2,13 +2,14 @@ structure Cache = struct type cache = {(* Takes a query ID and parameters (and, for store, the value to - store) and gives an FFI call that checks, stores, or flushes the - relevant entry. The parameters are strings for check and store and - optional strings for flush because some parameters might not be - fixed. *) + store) and gives an FFI call that checks, stores, or flushes the + relevant entry. The parameters are strings for check and store and + optional strings for flush because some parameters might not be + fixed. *) check : int * Mono.exp list -> Mono.exp', store : int * Mono.exp list * Mono.exp -> Mono.exp', flush : int * Mono.exp list -> Mono.exp', + lock : int * bool (* true = write, false = read *) -> Mono.exp', (* Generates C needed for FFI calls in check, store, and flush. *) setupGlobal : Print.PD.pp_desc, setupQuery : {index : int, params : int} -> Print.PD.pp_desc} diff --git a/src/lru_cache.sml b/src/lru_cache.sml index b6ffe700..b66becb7 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -24,6 +24,9 @@ fun store (index, keys, value) = fun flush (index, keys) = ffiAppCache' ("flush", index, withTyp optionStringTyp keys) +fun lock (index, write) = + ffiAppCache' ((if write then "w" else "r") ^ "lock", index, []) + (* Cjr *) @@ -157,18 +160,18 @@ fun toyIfNoKeys numKeys implLru implToy args = else implLru args val cache = - let - val {check = toyCheck, - store = toyStore, - flush = toyFlush, - setupQuery = toySetupQuery, - ...} = ToyCache.cache - in - {check = toyIfNoKeys (length o #2) check toyCheck, - store = toyIfNoKeys (length o #2) store toyStore, - flush = toyIfNoKeys (length o #2) flush toyFlush, - setupQuery = toyIfNoKeys #params setupQuery toySetupQuery, - setupGlobal = setupGlobal} - end + (* let *) + (* val {check = toyCheck, *) + (* store = toyStore, *) + (* flush = toyFlush, *) + (* setupQuery = toySetupQuery, *) + (* ...} = ToyCache.cache *) + (* in *) + (* {check = toyIfNoKeys (length o #2) check toyCheck, *) + (* store = toyIfNoKeys (length o #2) store toyStore, *) + (* flush = toyIfNoKeys (length o #2) flush toyFlush, *) + {check = check, store = store, flush = flush, lock = lock, + setupQuery = setupQuery, setupGlobal = setupGlobal} + (* end *) end diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 5a748496..2b3b80ae 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,6 +1,9 @@ structure Sqlcache :> SQLCACHE = struct -open Mono + +(*********************) +(* General Utilities *) +(*********************) structure IK = struct type ord_key = int val compare = Int.compare end structure IS = IntBinarySet @@ -8,10 +11,9 @@ structure IM = IntBinaryMap structure SK = struct type ord_key = string val compare = String.compare end structure SS = BinarySetFn(SK) structure SM = BinaryMapFn(SK) +structure IIMM = MultimapFn(structure KeyMap = IM structure ValSet = IS) structure SIMM = MultimapFn(structure KeyMap = SM structure ValSet = IS) -(* ASK: how do we deal with heap reallocation? *) - fun id x = x fun iterate f n x = if n < 0 @@ -20,6 +22,35 @@ fun iterate f n x = if n < 0 then x else iterate f (n-1) (f x) +(* From the MLton wiki. *) +infix 3 <\ fun x <\ f = fn y => f (x, y) (* Left section *) +infix 3 \> fun f \> y = f y (* Left application *) + +fun mapFst f (x, y) = (f x, y) + +(* Option monad. *) +fun obind (x, f) = Option.mapPartial f x +fun oguard (b, x) = if b then x else NONE +fun omap f = fn SOME x => SOME (f x) | _ => NONE +fun omap2 f = fn (SOME x, SOME y) => SOME (f (x,y)) | _ => NONE +fun osequence ys = List.foldr (omap2 op::) (SOME []) ys + +fun indexOf test = + let + fun f n = + fn [] => NONE + | (x::xs) => if test x then SOME n else f (n+1) xs + in + f 0 + end + + +(************) +(* Settings *) +(************) + +open Mono + (* Filled in by [addFlushing]. *) val ffiInfoRef : {index : int, params : int} list ref = ref [] @@ -59,6 +90,11 @@ val alwaysConsolidateRef = ref true fun setAlwaysConsolidate b = alwaysConsolidateRef := b fun getAlwaysConsolidate () = !alwaysConsolidateRef + +(************************) +(* Really Useful Things *) +(************************) + (* Used to have type context for local variables in MonoUtil functions. *) val doBind = fn (env, MonoUtil.Exp.RelE (x, t)) => MonoEnv.pushERel env x t NONE @@ -79,36 +115,26 @@ fun obindDebug printer (x, f) = NONE => (printer (); NONE) | y => y -(*********************) -(* General Utilities *) -(*********************) - -(* From the MLton wiki. *) -infix 3 <\ fun x <\ f = fn y => f (x, y) (* Left section *) -infix 3 \> fun f \> y = f y (* Left application *) -fun mapFst f (x, y) = (f x, y) - -(* Option monad. *) -fun obind (x, f) = Option.mapPartial f x -fun oguard (b, x) = if b then x else NONE -fun omap f = fn SOME x => SOME (f x) | _ => NONE -fun omap2 f = fn (SOME x, SOME y) => SOME (f (x,y)) | _ => NONE -fun osequence ys = List.foldr (omap2 op::) (SOME []) ys +(*******************) +(* Effect Analysis *) +(*******************) -fun indexOf test = +(* TODO: test this. *) +fun transitiveAnalysis doVal state (decls, _) = let - fun f n = - fn [] => NONE - | (x::xs) => if test x then SOME n else f (n+1) xs + val doDecl = + fn ((DVal v, _), state) => doVal (v, state) + (* Pass over the list of values a number of times equal to its size, + making sure whatever property we're testing propagates everywhere + it should. This is analagous to the Bellman-Ford algorithm. *) + | ((DValRec vs, _), state) => + iterate (fn state => List.foldl doVal state vs) (length vs) state + | (_, state) => state in - f 0 + List.foldl doDecl state decls end -(*******************) -(* Effect Analysis *) -(*******************) - (* Makes an exception for [EWrite] (which is recorded when caching). *) fun effectful (effs : IS.set) = let @@ -151,24 +177,13 @@ fun effectful (effs : IS.set) = end (* TODO: test this. *) -fun effectfulDecls (decls, _) = - let - fun doVal ((_, name, _, e, _), effs) = - if effectful effs MonoEnv.empty e - then IS.add (effs, name) - else effs - val doDecl = - fn ((DVal v, _), effs) => doVal (v, effs) - (* Repeat the list of declarations a number of times equal to its size, - making sure effectfulness propagates everywhere it should. This is - analagous to the Bellman-Ford algorithm. *) - | ((DValRec vs, _), effs) => - List.foldl doVal effs (List.concat (List.map (fn _ => vs) vs)) - (* ASK: any other cases? *) - | (_, effs) => effs - in - List.foldl doDecl IS.empty decls - end +fun effectfulDecls file = + transitiveAnalysis (fn ((_, name, _, e, _), effs) => + if effectful effs MonoEnv.empty e + then IS.add (effs, name) + else effs) + IS.empty + file (*********************************) @@ -1080,9 +1095,7 @@ fun typOfExp' (env : MonoEnv.env) : exp' -> typ option = | ERecord fields => SOME (TRecord (map (fn (s, _, t) => (s, t)) fields), dummyLoc) | EField (e, s) => (case typOfExp env e of SOME (TRecord fields, _) => - (case List.find (fn (s', _) => s = s') fields of - SOME (_, t) => SOME t - | _ => NONE) + omap #2 (List.find (fn (s', _) => s = s') fields) | _ => NONE) | ECase (_, _, {result, ...}) => SOME result | EStrcat _ => SOME (TFfi ("Basis", "string"), dummyLoc) @@ -1414,6 +1427,46 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state end +(***********) +(* Locking *) +(***********) + +(* TODO: do this less evil-ly by not relying on specific FFI names, please? *) +fun locksNeeded file = + transitiveAnalysis + (fn ((_, name, _, e, _), state) => + MonoUtil.Exp.fold + {typ = #2, + exp = fn (EFfiApp ("Sqlcache", x, _), state as {store, flush}) => + (case Int.fromString (String.extract (x, 5, NONE)) of + NONE => raise Match + | SOME index => + if String.isPrefix "store" x + then {store = IIMM.insert (store, name, index), flush = flush} + else if String.isPrefix "flush" x + then {store = store, flush = IIMM.insert (flush, name, index)} + else state) + | _ => state} + state + e) + {store = IIMM.empty, flush = IIMM.empty} + file + +fun exports (decls, _) = + List.foldl (fn ((DExport (_, _, n, _, _, _), _), ns) => IS.add (ns, n) + | ((DTask _, _), _) => raise Fail "Sqlcache doesn't yet support tasks." + | (_, ns) => ns) + IS.empty + decls + +(* fun addLocking file = *) +(* let *) +(* val whichLocks = locksNeeded file *) +(* val needsLocks = exports file *) +(* in *) + +(* end *) + (************************) (* Compiler Entry Point *) (************************) diff --git a/src/toy_cache.sml b/src/toy_cache.sml index 377cae01..5c5aa459 100644 --- a/src/toy_cache.sml +++ b/src/toy_cache.sml @@ -24,6 +24,9 @@ fun store (index, keys, value) = fun flush (index, keys) = ffiAppCache' ("flush", index, withTyp optionStringTyp keys) +fun lock (index, keys) = + raise Fail "ToyCache doesn't yet implement lock" + (* Cjr *) @@ -198,7 +201,7 @@ val setupGlobal = string "/* No global setup for toy cache. */" (* Bundled up. *) -val cache = {check = check, store = store, flush = flush, +val cache = {check = check, store = store, flush = flush, lock = lock, setupQuery = setupQuery, setupGlobal = setupGlobal} end -- cgit v1.2.3 From d67e2a35789c5e4c7ad603c15d2acdc826fcdc76 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 13 Nov 2015 01:05:22 -0500 Subject: Remove debugging print statement. --- src/c/urweb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 02e17a0b..778adacc 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4713,7 +4713,6 @@ void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { time_t timeNow = uw_Sqlcache_getTimeNow(cache); uw_Sqlcache_Entry *entry; if (numKeys == 0) { - puts("flush cache of height 0"); entry = cache->table; if (entry) { uw_Sqlcache_freeValue(entry->value); -- cgit v1.2.3 From bad52a2868ff0551ac0199fd8124f81f9623391e Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 13 Nov 2015 11:03:09 -0500 Subject: Finish locking, but it's not yet tested rigorously. --- include/urweb/types_cpp.h | 3 +- include/urweb/urweb_cpp.h | 2 + src/c/urweb.c | 143 ++++++++++++++++++++++++++++++---------------- src/lru_cache.sml | 20 ++++++- src/sqlcache.sml | 51 ++++++++++++----- 5 files changed, 154 insertions(+), 65 deletions(-) (limited to 'src') diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h index 82f8d30a..ce0f2825 100644 --- a/include/urweb/types_cpp.h +++ b/include/urweb/types_cpp.h @@ -133,7 +133,8 @@ typedef struct uw_Sqlcache_Value { typedef struct uw_Sqlcache_Entry uw_Sqlcache_Entry; typedef struct uw_Sqlcache_Cache { - pthread_rwlock_t lock; + pthread_rwlock_t lockOut; + pthread_rwlock_t lockIn; uw_Sqlcache_Entry *table; unsigned long timeInvalid; unsigned long timeNow; diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h index 2c032e7b..916fbbf9 100644 --- a/include/urweb/urweb_cpp.h +++ b/include/urweb/urweb_cpp.h @@ -406,6 +406,8 @@ void uw_Basis_writec(struct uw_context *, char); // Sqlcache. +void *uw_Sqlcache_rlock(struct uw_context *, uw_Sqlcache_Cache *); +void *uw_Sqlcache_wlock(struct uw_context *, uw_Sqlcache_Cache *); uw_Sqlcache_Value *uw_Sqlcache_check(struct uw_context *, uw_Sqlcache_Cache *, char **); void *uw_Sqlcache_store(struct uw_context *, uw_Sqlcache_Cache *, char **, uw_Sqlcache_Value *); void *uw_Sqlcache_flush(struct uw_context *, uw_Sqlcache_Cache *, char **); diff --git a/src/c/urweb.c b/src/c/urweb.c index 778adacc..6a48e95e 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -366,6 +366,9 @@ void uw_global_init() { uw_global_custom(); uw_init_crypto(); + + // Fast non-cryptographic strength randomness for Sqlcache. + srandom(clock()); } void uw_app_init(uw_app *app) { @@ -431,6 +434,11 @@ typedef struct uw_Sqlcache_Update { struct uw_Sqlcache_Update *next; } uw_Sqlcache_Update; +typedef struct uw_Sqlcache_Unlock { + pthread_rwlock_t *lock; + struct uw_Sqlcache_Unlock *next; +} uw_Sqlcache_Unlock; + struct uw_context { uw_app *app; int id; @@ -500,6 +508,7 @@ struct uw_context { int recordingOffset; uw_Sqlcache_Update *cacheUpdate; uw_Sqlcache_Update *cacheUpdateTail; + uw_Sqlcache_Unlock *cacheUnlock; int remoteSock; }; @@ -4556,7 +4565,7 @@ typedef struct uw_Sqlcache_Entry { UT_hash_handle hh; } uw_Sqlcache_Entry; -void uw_Sqlcache_freeValue(uw_Sqlcache_Value *value) { +static void uw_Sqlcache_freeValue(uw_Sqlcache_Value *value) { if (value) { free(value->result); free(value->output); @@ -4564,7 +4573,7 @@ void uw_Sqlcache_freeValue(uw_Sqlcache_Value *value) { } } -void uw_Sqlcache_freeEntry(uw_Sqlcache_Entry* entry) { +static void uw_Sqlcache_freeEntry(uw_Sqlcache_Entry* entry) { if (entry) { free(entry->key); uw_Sqlcache_freeValue(entry->value); @@ -4573,14 +4582,14 @@ void uw_Sqlcache_freeEntry(uw_Sqlcache_Entry* entry) { } // TODO: pick a number. -unsigned int uw_Sqlcache_maxSize = 1234567890; +static unsigned int uw_Sqlcache_maxSize = 1234567890; -void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry) { +static void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry) { HASH_DEL(cache->table, entry); uw_Sqlcache_freeEntry(entry); } -uw_Sqlcache_Entry *uw_Sqlcache_find(uw_Sqlcache_Cache *cache, char *key, size_t len, int bump) { +static uw_Sqlcache_Entry *uw_Sqlcache_find(uw_Sqlcache_Cache *cache, char *key, size_t len, int bump) { uw_Sqlcache_Entry *entry = NULL; HASH_FIND(hh, cache->table, key, len, entry); if (entry && bump) { @@ -4592,7 +4601,7 @@ uw_Sqlcache_Entry *uw_Sqlcache_find(uw_Sqlcache_Cache *cache, char *key, size_t return entry; } -void uw_Sqlcache_add(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry, size_t len) { +static void uw_Sqlcache_add(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry, size_t len) { HASH_ADD_KEYPTR(hh, cache->table, entry->key, len, entry); if (HASH_COUNT(cache->table) > uw_Sqlcache_maxSize) { // Deletes the first element of the cache. @@ -4600,17 +4609,17 @@ void uw_Sqlcache_add(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry, size_t } } -unsigned long uw_Sqlcache_getTimeNow(uw_Sqlcache_Cache *cache) { +static unsigned long uw_Sqlcache_getTimeNow(uw_Sqlcache_Cache *cache) { return ++cache->timeNow; } -unsigned long uw_Sqlcache_timeMax(unsigned long x, unsigned long y) { +static unsigned long uw_Sqlcache_timeMax(unsigned long x, unsigned long y) { return x > y ? x : y; } -char uw_Sqlcache_keySep = '_'; +static char uw_Sqlcache_keySep = '_'; -char *uw_Sqlcache_allocKeyBuffer(char **keys, size_t numKeys) { +static char *uw_Sqlcache_allocKeyBuffer(char **keys, size_t numKeys) { size_t len = 0; while (numKeys-- > 0) { char* k = keys[numKeys]; @@ -4627,7 +4636,7 @@ char *uw_Sqlcache_allocKeyBuffer(char **keys, size_t numKeys) { return buf; } -char *uw_Sqlcache_keyCopy(char *buf, char *key) { +static char *uw_Sqlcache_keyCopy(char *buf, char *key) { *buf++ = uw_Sqlcache_keySep; return stpcpy(buf, key); } @@ -4635,7 +4644,12 @@ char *uw_Sqlcache_keyCopy(char *buf, char *key) { // The NUL-terminated prefix of [key] below always looks something like "_k1_k2_k3..._kn". uw_Sqlcache_Value *uw_Sqlcache_check(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { - pthread_rwlock_rdlock(&cache->lock); + int doBump = random() % 1024 == 0; + if (doBump) { + pthread_rwlock_wrlock(&cache->lockIn); + } else { + pthread_rwlock_rdlock(&cache->lockIn); + } size_t numKeys = cache->numKeys; char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); char *buf = key; @@ -4645,46 +4659,49 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_context ctx, uw_Sqlcache_Cache *cache, c entry = cache->table; if (!entry) { free(key); - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); return NULL; } } else { while (numKeys-- > 0) { buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); size_t len = buf - key; - entry = uw_Sqlcache_find(cache, key, len, 1); + entry = uw_Sqlcache_find(cache, key, len, doBump); if (!entry) { free(key); - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); return NULL; } timeInvalid = uw_Sqlcache_timeMax(timeInvalid, entry->timeInvalid); } free(key); } - // TODO: pass back copy of value and free it in the generated code... or use uw_malloc? uw_Sqlcache_Value *value = entry->value; - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); + // ASK: though the argument isn't trivial, this is safe, right? + // Returning outside the lock is safe because updates happen at commit time. + // Those are the only times the returned value or its strings can get freed. + // Handler output is a new string, so it's safe to free this at commit time. return value && value->timeValid > timeInvalid ? value : NULL; } -void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { - pthread_rwlock_wrlock(&cache->lock); +static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { + pthread_rwlock_wrlock(&cache->lockIn); size_t numKeys = cache->numKeys; - char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); - char *buf = key; time_t timeNow = uw_Sqlcache_getTimeNow(cache); uw_Sqlcache_Entry *entry; if (numKeys == 0) { entry = cache->table; if (!entry) { entry = malloc(sizeof(uw_Sqlcache_Entry)); - entry->key = strdup(key); + entry->key = NULL; entry->value = NULL; entry->timeInvalid = 0; cache->table = entry; } } else { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; while (numKeys-- > 0) { buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); size_t len = buf - key; @@ -4702,23 +4719,23 @@ void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcac uw_Sqlcache_freeValue(entry->value); entry->value = value; entry->value->timeValid = timeNow; - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); } -void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { - pthread_rwlock_wrlock(&cache->lock); +static void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { + pthread_rwlock_wrlock(&cache->lockIn); size_t numKeys = cache->numKeys; - char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); - char *buf = key; - time_t timeNow = uw_Sqlcache_getTimeNow(cache); - uw_Sqlcache_Entry *entry; if (numKeys == 0) { - entry = cache->table; + uw_Sqlcache_Entry *entry = cache->table; if (entry) { uw_Sqlcache_freeValue(entry->value); entry->value = NULL; } } else { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; + time_t timeNow = uw_Sqlcache_getTimeNow(cache); + uw_Sqlcache_Entry *entry = NULL; while (numKeys-- > 0) { char *k = keys[numKeys]; if (!k) { @@ -4729,15 +4746,16 @@ void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { cache->timeInvalid = timeNow; } free(key); - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); return; } buf = uw_Sqlcache_keyCopy(buf, k); size_t len = buf - key; entry = uw_Sqlcache_find(cache, key, len, 0); if (!entry) { + // Nothing in the cache to flush. free(key); - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); return; } } @@ -4745,10 +4763,25 @@ void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { // All the keys were non-null and the relevant entry is present, so we delete it. uw_Sqlcache_delete(cache, entry); } - pthread_rwlock_unlock(&cache->lock); + pthread_rwlock_unlock(&cache->lockIn); +} + +static void uw_Sqlcache_commit(void *data) { + uw_context ctx = (uw_context)data; + uw_Sqlcache_Update *update = ctx->cacheUpdate; + while (update) { + uw_Sqlcache_Cache *cache = update->cache; + char **keys = update->keys; + if (update->value) { + uw_Sqlcache_storeCommitOne(cache, keys, update->value); + } else { + uw_Sqlcache_flushCommitOne(cache, keys); + } + update = update->next; + } } -void uw_Sqlcache_freeUpdate(void *data, int dontCare) { +static void uw_Sqlcache_free(void *data, int dontCare) { uw_context ctx = (uw_context)data; uw_Sqlcache_Update *update = ctx->cacheUpdate; while (update) { @@ -4765,24 +4798,38 @@ void uw_Sqlcache_freeUpdate(void *data, int dontCare) { } ctx->cacheUpdate = NULL; ctx->cacheUpdateTail = NULL; + uw_Sqlcache_Unlock *unlock = ctx->cacheUnlock; + while (unlock) { + pthread_rwlock_unlock(unlock->lock); + uw_Sqlcache_Unlock *nextUnlock = unlock->next; + free(unlock); + unlock = nextUnlock; + } + ctx->cacheUnlock = NULL; } -void uw_Sqlcache_commitUpdate(void *data) { - uw_context ctx = (uw_context)data; - uw_Sqlcache_Update *update = ctx->cacheUpdate; - while (update) { - uw_Sqlcache_Cache *cache = update->cache; - char **keys = update->keys; - if (update->value) { - uw_Sqlcache_storeCommitOne(cache, keys, update->value); - } else { - uw_Sqlcache_flushCommitOne(cache, keys); - } - update = update->next; +static void uw_Sqlcache_pushUnlock(uw_context ctx, pthread_rwlock_t *lock) { + if (!ctx->cacheUnlock) { + // Just need one registered commit for both updating and unlocking. + uw_register_transactional(ctx, ctx, uw_Sqlcache_commit, NULL, uw_Sqlcache_free); } + uw_Sqlcache_Unlock *unlock = malloc(sizeof(uw_Sqlcache_Unlock)); + unlock->lock = lock; + unlock->next = ctx->cacheUnlock; + ctx->cacheUnlock = unlock; +} + +void uw_Sqlcache_rlock(uw_context ctx, uw_Sqlcache_Cache *cache) { + pthread_rwlock_rdlock(&cache->lockOut); + uw_Sqlcache_pushUnlock(ctx, &cache->lockOut); +} + +void uw_Sqlcache_wlock(uw_context ctx, uw_Sqlcache_Cache *cache) { + pthread_rwlock_wrlock(&cache->lockOut); + uw_Sqlcache_pushUnlock(ctx, &cache->lockOut); } -char **uw_Sqlcache_copyKeys(char **keys, size_t numKeys) { +static char **uw_Sqlcache_copyKeys(char **keys, size_t numKeys) { char **copy = malloc(sizeof(char *) * numKeys); while (numKeys-- > 0) { char *k = keys[numKeys]; @@ -4798,11 +4845,9 @@ void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw update->value = value; update->next = NULL; if (ctx->cacheUpdateTail) { - // An update is already registered, so just extend it. ctx->cacheUpdateTail->next = update; } else { ctx->cacheUpdate = update; - uw_register_transactional(ctx, ctx, uw_Sqlcache_commitUpdate, NULL, uw_Sqlcache_freeUpdate); } ctx->cacheUpdateTail = update; } diff --git a/src/lru_cache.sml b/src/lru_cache.sml index b66becb7..0276de91 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -69,7 +69,9 @@ fun setupQuery {index, params} = Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), newline, - string " .lock = PTHREAD_RWLOCK_INITIALIZER,", + string " .lockIn = PTHREAD_RWLOCK_INITIALIZER,", + newline, + string " .lockOut = PTHREAD_RWLOCK_INITIALIZER,", newline, string " .table = NULL,", newline, @@ -83,6 +85,22 @@ fun setupQuery {index, params} = newline, newline, + string ("static void uw_Sqlcache_rlock" ^ i ^ "(uw_context ctx) {"), + newline, + string (" uw_Sqlcache_rlock(ctx, cache" ^ i ^ ");"), + newline, + string "}", + newline, + newline, + + string ("static void uw_Sqlcache_wlock" ^ i ^ "(uw_context ctx) {"), + newline, + string (" uw_Sqlcache_wlock(ctx, cache" ^ i ^ ");"), + newline, + string "}", + newline, + newline, + string ("static uw_Basis_string uw_Sqlcache_check" ^ i), string ("(uw_context ctx" ^ typedArgs ^ ") {"), newline, diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 2b3b80ae..6583dc91 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -913,7 +913,7 @@ val conflictMaps = ConflictMaps.conflictMaps (* Program Instrumentation Utilities *) (*************************************) -val {check, store, flush, ...} = getCache () +val {check, store, flush, lock, ...} = getCache () val dummyTyp = (TRecord [], dummyLoc) @@ -1431,7 +1431,7 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state (* Locking *) (***********) -(* TODO: do this less evil-ly by not relying on specific FFI names, please? *) +(* TODO: do this less evilly by not relying on specific FFI names, please? *) fun locksNeeded file = transitiveAnalysis (fn ((_, name, _, e, _), state) => @@ -1439,14 +1439,14 @@ fun locksNeeded file = {typ = #2, exp = fn (EFfiApp ("Sqlcache", x, _), state as {store, flush}) => (case Int.fromString (String.extract (x, 5, NONE)) of - NONE => raise Match + NONE => state | SOME index => - if String.isPrefix "store" x - then {store = IIMM.insert (store, name, index), flush = flush} - else if String.isPrefix "flush" x + if String.isPrefix "flush" x then {store = store, flush = IIMM.insert (flush, name, index)} + else if String.isPrefix "store" x + then {store = IIMM.insert (store, name, index), flush = flush} else state) - | _ => state} + | (_, state) => state} state e) {store = IIMM.empty, flush = IIMM.empty} @@ -1459,13 +1459,36 @@ fun exports (decls, _) = IS.empty decls -(* fun addLocking file = *) -(* let *) -(* val whichLocks = locksNeeded file *) -(* val needsLocks = exports file *) -(* in *) +fun wrapLocks (locks, (exp', loc)) = + case exp' of + EAbs (s, t1, t2, exp) => (EAbs (s, t1, t2, wrapLocks (locks, exp)), loc) + | _ => (List.foldr (fn (l, e') => sequence [lock l, e']) exp' locks, loc) + +fun addLocking file = + let + val {store, flush} = locksNeeded file + fun locks n = + let + val wlocks = IIMM.findSet (flush, n) + val rlocks = IIMM.findSet (store, n) + val ls = map (fn i => (i, true)) (IS.listItems wlocks) + @ map (fn i => (i, false)) (IS.listItems (IS.difference (rlocks, wlocks))) + in + ListMergeSort.sort (fn ((i, _), (j, _)) => i > j) ls + end + val expts = exports file + fun doVal (v as (x, n, t, exp, s)) = + if IS.member (expts, n) + then (x, n, t, wrapLocks ((locks n), exp), s) + else v + val doDecl = + fn (DVal v, loc) => (DVal (doVal v), loc) + | (DValRec vs, loc) => (DValRec (map doVal vs), loc) + | decl => decl + in + mapFst (map doDecl) file + end -(* end *) (************************) (* Compiler Entry Point *) @@ -1494,7 +1517,7 @@ fun insertAfterDatatypes ((decls, sideInfo), newDecls) = (datatypes @ newDecls @ others, sideInfo) end -val go' = addFlushing o addCaching o simplifySql o inlineSql +val go' = addLocking o addFlushing o addCaching o simplifySql o inlineSql fun go file = let -- cgit v1.2.3 From ed7b5e6f956c5b13735cc3e5c4de01fbfc437e12 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Sun, 15 Nov 2015 14:18:35 -0500 Subject: Fix bugs for lock calculation and SQL parsing and add support for tasks. --- caching-tests/test.urp | 2 +- src/lru_cache.sml | 12 ++--- src/sqlcache.sml | 126 +++++++++++++++++++++++++++++++------------------ 3 files changed, 87 insertions(+), 53 deletions(-) (limited to 'src') diff --git a/caching-tests/test.urp b/caching-tests/test.urp index cea8821e..dd8cf774 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -1,4 +1,4 @@ -database test.db +database host=localhost dbname=ziv sql test.sql safeGet Test/flush safeGet Test/flash diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 0276de91..e9ed5f73 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -111,16 +111,16 @@ fun setupQuery {index, params} = (* If the output is null, it means we had too much recursion, so it's a miss. *) string " if (v && v->output != NULL) {", newline, - string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), - newline, + (* string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), *) + (* newline, *) string " uw_write(ctx, v->output);", newline, string " return v->result;", newline, string " } else {", newline, - string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), - newline, + (* string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), *) + (* newline, *) string " uw_recordingStart(ctx);", newline, string " return NULL;", @@ -142,8 +142,8 @@ fun setupQuery {index, params} = newline, string " v->output = uw_recordingRead(ctx);", newline, - string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), - newline, + (* string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), *) + (* newline, *) string (" uw_Sqlcache_store(ctx, cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 6583dc91..481acbeb 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache :> SQLCACHE = struct +structure Sqlcache (* DEBUG :> SQLCACHE *) = struct (*********************) @@ -312,7 +312,9 @@ fun removeRedundant madeRedundantBy zs = end datatype atomExp = - QueryArg of int + True + | False + | QueryArg of int | DmlRel of int | Prim of Prim.t | Field of string * string @@ -322,7 +324,13 @@ structure AtomExpKey : ORD_KEY = struct type ord_key = atomExp val compare = - fn (QueryArg n1, QueryArg n2) => Int.compare (n1, n2) + fn (True, True) => EQUAL + | (True, _) => LESS + | (_, True) => GREATER + | (False, False) => EQUAL + | (False, _) => LESS + | (_, False) => GREATER + | (QueryArg n1, QueryArg n2) => Int.compare (n1, n2) | (QueryArg _, _) => LESS | (_, QueryArg _) => GREATER | (DmlRel n1, DmlRel n2) => Int.compare (n1, n2) @@ -531,7 +539,7 @@ end = struct project from a sqlified value (which is a string). *) | (_, sq as SOME _, [], NONE) => wrap sq - | _ => raise Match + | _ => raise Fail "Sqlcache: traverseSubst" end) (f n) in @@ -620,7 +628,7 @@ end = struct AM.find (argsMap, arg) <\obind\> (fn n' => SOME (ERel n'))) - | _ => raise Match + | _ => raise Fail "Sqlcache: query (a)" in case (map #1 qs) of (q :: qs) => @@ -629,16 +637,16 @@ end = struct val ns = IS.listItems (varsOfQuery q) val rename = fn ERel n => omap ERel (indexOf (fn n' => n' = n) ns) - | _ => raise Match + | _ => raise Fail "Sqlcache: query (b)" in case omapQuery rename q of SOME q => q (* We should never get NONE because indexOf should never fail. *) - | NONE => raise Match + | NONE => raise Fail "Sqlcache: query (c)" end (* We should never reach this case because [updateState] won't put anything in the state if there are no queries. *) - | [] => raise Match + | [] => raise Fail "Sqlcache: query (d)" end val argOfExp = @@ -700,8 +708,23 @@ val rec sqexpToFormula = | Sql.Binop (Sql.RCmp c, e1, e2) => Atom (c, e1, e2) | Sql.Binop (Sql.RLop l, p1, p2) => Combo (case l of Sql.And => Conj | Sql.Or => Disj, [sqexpToFormula p1, sqexpToFormula p2]) + | e as Sql.Field f => Atom (Sql.Eq, e, Sql.SqTrue) (* ASK: any other sqexps that can be props? *) - | _ => raise Match + | Sql.SqConst prim => + (case prim of + (Prim.String (Prim.Normal, s)) => + if s = #trueString (Settings.currentDbms ()) + then Combo (Conj, []) + else if s = #falseString (Settings.currentDbms ()) + then Combo (Disj, []) + else raise Fail "Sqlcache: sqexpToFormula (SqConst a)" + | _ => raise Fail "Sqlcache: sqexpToFormula (SqConst b)") + | Sql.Computed _ => raise Fail "Sqlcache: sqexpToFormula (Computed)" + | Sql.SqKnown _ => raise Fail "Sqlcache: sqexpToFormula (SqKnown)" + | Sql.Inj _ => raise Fail "Sqlcache: sqexpToFormula (Inj)" + | Sql.SqFunc _ => raise Fail "Sqlcache: sqexpToFormula (SqFunc)" + | Sql.Unmodeled => raise Fail "Sqlcache: sqexpToFormula (Unmodeled)" + | Sql.Null => raise Fail "Sqlcache: sqexpToFormula (Null)" fun mapSqexpFields f = fn Sql.Field (t, v) => f (t, v) @@ -799,9 +822,6 @@ structure ConflictMaps = struct fun equivClasses atoms : atomExp list list option = let val uf = List.foldl UF.union' UF.empty (List.mapPartial toKnownEquality atoms) - val ineqs = List.filter (fn (cmp, _, _) => - cmp = Sql.Ne orelse cmp = Sql.Lt orelse cmp = Sql.Gt) - atoms val contradiction = fn (cmp, SOME ae1, SOME ae2) => (cmp = Sql.Ne orelse cmp = Sql.Lt orelse cmp = Sql.Gt) andalso UF.together (uf, ae1, ae2) @@ -928,7 +948,7 @@ val sequence = in List.foldl (fn (e', seq) => ESeq ((seq, loc), (e', loc))) exp exps end - | _ => raise Match + | _ => raise Fail "Sqlcache: sequence" (* Always increments negative indices as a hack we use later. *) fun incRels inc = @@ -983,7 +1003,7 @@ fun fileAllMapfoldB doExp file start = bind = doBind} MonoEnv.empty file start of Search.Continue x => x - | Search.Return _ => raise Match + | Search.Return _ => raise Fail "Sqlcache: fileAllMapfoldB" fun fileMap doExp file = #1 (fileAllMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) @@ -1029,7 +1049,7 @@ val simplifySql = val text = case exp' of EQuery {query = text, ...} => text | EDml (text, _) => text - | _ => raise Match + | _ => raise Fail "Sqlcache: simplifySql (a)" val (newText, wrapLets, numArgs) = factorOutNontrivial text val newExp' = case exp' of EQuery q => EQuery {query = newText, @@ -1039,7 +1059,7 @@ val simplifySql = body = #body q, initial = #initial q} | EDml (_, failureMode) => EDml (newText, failureMode) - | _ => raise Match + | _ => raise Fail "Sqlcache: simplifySql (b)" in (* Increment once for each new variable just made. This is where we use the negative De Bruijn indices hack. *) @@ -1128,7 +1148,7 @@ val runSubexp : subexp * state -> exp * state = val invalInfoOfSubexp = fn Cachable (invalInfo, _) => invalInfo - | Impure _ => raise Match + | Impure _ => raise Fail "Sqlcache: invalInfoOfSubexp" fun cacheWrap (env, exp, typ, args, index) = let @@ -1275,9 +1295,11 @@ fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = | NONE => mapFst Impure (mkExp state) end fun wrapBind1 f arg = - wrapBindN (fn [arg] => f arg | _ => raise Match) [arg] + wrapBindN (fn [arg] => f arg + | _ => raise Fail "Sqlcache: cacheTree (a)") [arg] fun wrapBind2 f (arg1, arg2) = - wrapBindN (fn [arg1, arg2] => f (arg1, arg2) | _ => raise Match) [arg1, arg2] + wrapBindN (fn [arg1, arg2] => f (arg1, arg2) + | _ => raise Fail "Sqlcache: cacheTree (b)") [arg1, arg2] fun wrapN f es = wrapBindN f (map (fn e => ((env, e), Unknowns 0)) es) fun wrap1 f e = wrapBind1 f ((env, e), Unknowns 0) fun wrap2 f (e1, e2) = wrapBind2 f (((env, e1), Unknowns 0), ((env, e2), Unknowns 0)) @@ -1306,7 +1328,7 @@ fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = ECase (e, (ListPair.map (fn (e, (p, _)) => (p, e)) (es, cases)), {disc = disc, result = result}) - | _ => raise Match) + | _ => raise Fail "Sqlcache: cacheTree (c)") (((env, e), Unknowns 0) :: map (fn (p, e) => ((MonoEnv.patBinds env p, e), Unknowns (MonoEnv.patBindsN p))) @@ -1362,7 +1384,7 @@ structure Invalidations = struct DmlRel n => ERel n | Prim p => EPrim p (* TODO: make new type containing only these two. *) - | _ => raise Match, + | _ => raise Fail "Sqlcache: optionAtomExpToExp", loc)), loc) @@ -1409,13 +1431,13 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state (i, invalidations (invalInfo, dmlParsed)) (* TODO: fail more gracefully. *) (* This probably means invalidating everything.... *) - | NONE => raise Match)) + | NONE => raise Fail "Sqlcache: addFlushing (a)")) (SIMM.findList (tableToIndices, tableOfDml dmlParsed))) | NONE => NONE in case inval of (* TODO: fail more gracefully. *) - NONE => raise Match + NONE => raise Fail "Sqlcache: addFlushing (b)" | SOME invs => sequence (flushes invs @ [dmlExp]) end | e' => e' @@ -1432,29 +1454,38 @@ fun addFlushing ((file, {tableToIndices, indexToInvalInfo, ffiInfo, ...} : state (***********) (* TODO: do this less evilly by not relying on specific FFI names, please? *) -fun locksNeeded file = +fun locksNeeded (lockMap : {store : IIMM.multimap, flush : IIMM.multimap}) = + MonoUtil.Exp.fold + {typ = #2, + exp = fn (EFfiApp ("Sqlcache", x, _), state as {store, flush}) => + (case Int.fromString (String.extract (x, 5, NONE)) of + NONE => state + | SOME index => + if String.isPrefix "flush" x + then {store = store, flush = IS.add (flush, index)} + else if String.isPrefix "store" x + then {store = IS.add (store, index), flush = flush} + else state) + | (ENamed n, {store, flush}) => + {store = IS.union (store, IIMM.findSet (#store lockMap, n)), + flush = IS.union (flush, IIMM.findSet (#flush lockMap, n))} + | (_, state) => state} + {store = IS.empty, flush = IS.empty} + +fun lockMapOfFile file = transitiveAnalysis (fn ((_, name, _, e, _), state) => - MonoUtil.Exp.fold - {typ = #2, - exp = fn (EFfiApp ("Sqlcache", x, _), state as {store, flush}) => - (case Int.fromString (String.extract (x, 5, NONE)) of - NONE => state - | SOME index => - if String.isPrefix "flush" x - then {store = store, flush = IIMM.insert (flush, name, index)} - else if String.isPrefix "store" x - then {store = IIMM.insert (store, name, index), flush = flush} - else state) - | (_, state) => state} - state - e) + let + val locks = locksNeeded state e + in + {store = IIMM.insertSet (#store state, name, #store locks), + flush = IIMM.insertSet (#flush state, name, #flush locks)} + end) {store = IIMM.empty, flush = IIMM.empty} file fun exports (decls, _) = List.foldl (fn ((DExport (_, _, n, _, _, _), _), ns) => IS.add (ns, n) - | ((DTask _, _), _) => raise Fail "Sqlcache doesn't yet support tasks." | (_, ns) => ns) IS.empty decls @@ -1466,24 +1497,27 @@ fun wrapLocks (locks, (exp', loc)) = fun addLocking file = let - val {store, flush} = locksNeeded file - fun locks n = + val lockMap = lockMapOfFile file + fun lockList {store, flush} = let - val wlocks = IIMM.findSet (flush, n) - val rlocks = IIMM.findSet (store, n) - val ls = map (fn i => (i, true)) (IS.listItems wlocks) - @ map (fn i => (i, false)) (IS.listItems (IS.difference (rlocks, wlocks))) + val ls = map (fn i => (i, true)) (IS.listItems flush) + @ map (fn i => (i, false)) (IS.listItems (IS.difference (store, flush))) in ListMergeSort.sort (fn ((i, _), (j, _)) => i > j) ls end + fun locksOfName n = + lockList {store = IIMM.findSet (#flush lockMap, n), + flush =IIMM.findSet (#store lockMap, n)} + val locksOfExp = lockList o locksNeeded lockMap val expts = exports file fun doVal (v as (x, n, t, exp, s)) = if IS.member (expts, n) - then (x, n, t, wrapLocks ((locks n), exp), s) + then (x, n, t, wrapLocks ((locksOfName n), exp), s) else v val doDecl = fn (DVal v, loc) => (DVal (doVal v), loc) | (DValRec vs, loc) => (DValRec (map doVal vs), loc) + | (DTask (exp1, exp2), loc) => (DTask (exp1, wrapLocks (locksOfExp exp2, exp2)), loc) | decl => decl in mapFst (map doDecl) file -- cgit v1.2.3 From 39804bcf37a35ca6a2cb5e49849ce9453c9025bc Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Tue, 17 Nov 2015 02:44:37 -0500 Subject: Make cache flushes happen immediately instead of at end of transaction. --- src/c/urweb.c | 90 ++++++++++++++++++++++++++++---------------------------- src/sqlcache.sml | 2 +- 2 files changed, 46 insertions(+), 46 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 6a48e95e..c1cfe94c 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4585,8 +4585,10 @@ static void uw_Sqlcache_freeEntry(uw_Sqlcache_Entry* entry) { static unsigned int uw_Sqlcache_maxSize = 1234567890; static void uw_Sqlcache_delete(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry) { - HASH_DEL(cache->table, entry); - uw_Sqlcache_freeEntry(entry); + if (entry) { + HASH_DEL(cache->table, entry); + uw_Sqlcache_freeEntry(entry); + } } static uw_Sqlcache_Entry *uw_Sqlcache_find(uw_Sqlcache_Cache *cache, char *key, size_t len, int bump) { @@ -4723,47 +4725,6 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw } static void uw_Sqlcache_flushCommitOne(uw_Sqlcache_Cache *cache, char **keys) { - pthread_rwlock_wrlock(&cache->lockIn); - size_t numKeys = cache->numKeys; - if (numKeys == 0) { - uw_Sqlcache_Entry *entry = cache->table; - if (entry) { - uw_Sqlcache_freeValue(entry->value); - entry->value = NULL; - } - } else { - char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); - char *buf = key; - time_t timeNow = uw_Sqlcache_getTimeNow(cache); - uw_Sqlcache_Entry *entry = NULL; - while (numKeys-- > 0) { - char *k = keys[numKeys]; - if (!k) { - if (entry) { - entry->timeInvalid = timeNow; - } else { - // Haven't found an entry yet, so the first key was null. - cache->timeInvalid = timeNow; - } - free(key); - pthread_rwlock_unlock(&cache->lockIn); - return; - } - buf = uw_Sqlcache_keyCopy(buf, k); - size_t len = buf - key; - entry = uw_Sqlcache_find(cache, key, len, 0); - if (!entry) { - // Nothing in the cache to flush. - free(key); - pthread_rwlock_unlock(&cache->lockIn); - return; - } - } - free(key); - // All the keys were non-null and the relevant entry is present, so we delete it. - uw_Sqlcache_delete(cache, entry); - } - pthread_rwlock_unlock(&cache->lockIn); } static void uw_Sqlcache_commit(void *data) { @@ -4853,6 +4814,45 @@ void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw } void uw_Sqlcache_flush(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys) { - // A flush is represented in the queue as storing NULL. - uw_Sqlcache_store(ctx, cache, keys, NULL); + // A flush has to happen immediately so that subsequent stores in the same transaction fail. + // This is safe to do because we will always call [uw_Sqlcache_wlock] earlier. + // If the transaction fails, the only harm done is a few extra cache misses. + pthread_rwlock_wrlock(&cache->lockIn); + size_t numKeys = cache->numKeys; + if (numKeys == 0) { + uw_Sqlcache_Entry *entry = cache->table; + if (entry) { + uw_Sqlcache_freeValue(entry->value); + entry->value = NULL; + } + } else { + char *key = uw_Sqlcache_allocKeyBuffer(keys, numKeys); + char *buf = key; + time_t timeNow = uw_Sqlcache_getTimeNow(cache); + while (numKeys-- > 0) { + char *k = keys[numKeys]; + if (!k) { + size_t len = buf - key; + if (len == 0) { + // The first key was null. + cache->timeInvalid = timeNow; + } else { + uw_Sqlcache_Entry *entry = uw_Sqlcache_find(cache, key, len, 0); + if (entry) { + entry->timeInvalid = timeNow; + } + } + free(key); + pthread_rwlock_unlock(&cache->lockIn); + return; + } + buf = uw_Sqlcache_keyCopy(buf, k); + } + // All the keys were non-null, so we delete the pointed-to entry. + size_t len = buf - key; + uw_Sqlcache_Entry *entry = uw_Sqlcache_find(cache, key, len, 0); + free(key); + uw_Sqlcache_delete(cache, entry); + } + pthread_rwlock_unlock(&cache->lockIn); } diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 481acbeb..a8ef647b 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -1,4 +1,4 @@ -structure Sqlcache (* DEBUG :> SQLCACHE *) = struct +structure Sqlcache :> SQLCACHE = struct (*********************) -- cgit v1.2.3 From bfcd84434ee997b474935aa13ae7bc1f3801d795 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 19 Nov 2015 01:59:00 -0500 Subject: Support nested queries but disable UrFlow for now. --- caching-tests/test.ur | 71 ++++++----- caching-tests/test.urp | 4 +- caching-tests/test.urs | 7 +- src/compiler.sig | 4 +- src/compiler.sml | 4 +- src/sources | 3 - src/sql.sig | 13 ++- src/sql.sml | 86 +++++++++----- src/sqlcache.sml | 310 ++++++++++++++++++++++++++++++++++--------------- 9 files changed, 332 insertions(+), 170 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index cbfde556..ea64bb2d 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -1,9 +1,7 @@ table tab : {Id : int, Val : int, Foo : int} PRIMARY KEY Id fun cache id = - res <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[id]}); + res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); return cache {case res of @@ -11,21 +9,32 @@ fun cache id = | Some row => {[row.Tab.Val]}} -fun sillyRecursive {Id = id : int, FooBar = fooBar} = - if fooBar <= 0 - then 0 - else 1 + sillyRecursive {Id = id, FooBar = fooBar - 1} +(* fun cacheAlt id = *) +(* res <- oneOrNoRows (SELECT Q.Id *) +(* FROM (SELECT Tab.Id AS Id FROM tab WHERE Tab.Id = {[id]}) *) +(* AS Q); *) +(* return *) +(* cacheAlt *) +(* {case res of *) +(* None => ? *) +(* | Some row => {[row.Q.Id]}} *) +(* *) -fun cacheR (r : {Id : int, FooBar : int}) = - res <- oneOrNoRows (SELECT tab.Val - FROM tab - WHERE tab.Id = {[r.Id]}); - return - cacheR {[r.FooBar]} - {case res of - None => ? - | Some row => {[row.Tab.Val]}} - +(* fun sillyRecursive {Id = id : int, FooBar = fooBar} = *) +(* if fooBar <= 0 *) +(* then 0 *) +(* else 1 + sillyRecursive {Id = id, FooBar = fooBar - 1} *) + +(* fun cacheR (r : {Id : int, FooBar : int}) = *) +(* res <- oneOrNoRows (SELECT tab.Val *) +(* FROM tab *) +(* WHERE tab.Id = {[r.Id]}); *) +(* return *) +(* cacheR {[r.FooBar]} *) +(* {case res of *) +(* None => ? *) +(* | Some row => {[row.Tab.Val]}} *) +(* *) (* fun cache2 id v = *) (* res <- oneOrNoRows (SELECT tab.Val *) @@ -60,21 +69,21 @@ fun flush id = Changed {[id]}! -fun flash id = - dml (UPDATE tab - SET Foo = Val - WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); - return - Maybe changed {[id]}? - +(* fun flash id = *) +(* dml (UPDATE tab *) +(* SET Foo = Val *) +(* WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); *) +(* return *) +(* Maybe changed {[id]}? *) +(* *) -fun floosh id = - dml (UPDATE tab - SET Id = {[id + 1]} - WHERE Id = {[id]}); - return - Shifted {[id]}! - +(* fun floosh id = *) +(* dml (UPDATE tab *) +(* SET Id = {[id + 1]} *) +(* WHERE Id = {[id]} OR Id = {[id - 1]} OR Id = {[id + 1]}); *) +(* return *) +(* Shifted {[id]}! *) +(* *) (* val flush17 = *) (* dml (UPDATE tab *) diff --git a/caching-tests/test.urp b/caching-tests/test.urp index 07922e69..2cb9e711 100644 --- a/caching-tests/test.urp +++ b/caching-tests/test.urp @@ -1,8 +1,8 @@ database host=localhost sql test.sql safeGet Test/flush -safeGet Test/flash -safeGet Test/floosh +# safeGet Test/flash +# safeGet Test/floosh # safeGet Test/flush17 minHeap 4096 diff --git a/caching-tests/test.urs b/caching-tests/test.urs index 1fa5a9c2..d6e8dd2e 100644 --- a/caching-tests/test.urs +++ b/caching-tests/test.urs @@ -1,7 +1,8 @@ val cache : int -> transaction page -val cacheR : {Id : int, FooBar : int} -> transaction page +(* val cacheAlt : int -> transaction page *) +(* val cacheR : {Id : int, FooBar : int} -> transaction page *) (* val cache2 : int -> int -> transaction page *) val flush : int -> transaction page -val flash : int -> transaction page -val floosh : int -> transaction page +(* val flash : int -> transaction page *) +(* val floosh : int -> transaction page *) (* val flush17 : transaction page *) diff --git a/src/compiler.sig b/src/compiler.sig index c154240a..1ab0f7ae 100644 --- a/src/compiler.sig +++ b/src/compiler.sig @@ -114,7 +114,7 @@ signature COMPILER = sig val untangle : (Mono.file, Mono.file) phase val mono_reduce : (Mono.file, Mono.file) phase val mono_shake : (Mono.file, Mono.file) phase - val iflow : (Mono.file, Mono.file) phase + (* val iflow : (Mono.file, Mono.file) phase *) val namejs : (Mono.file, Mono.file) phase val scriptcheck : (Mono.file, Mono.file) phase val jscomp : (Mono.file, Mono.file) phase @@ -169,7 +169,7 @@ signature COMPILER = sig val toMono_reduce : (string, Mono.file) transform val toMono_shake : (string, Mono.file) transform val toMono_opt2 : (string, Mono.file) transform - val toIflow : (string, Mono.file) transform + (* val toIflow : (string, Mono.file) transform *) val toNamejs : (string, Mono.file) transform val toNamejs_untangle : (string, Mono.file) transform val toScriptcheck : (string, Mono.file) transform diff --git a/src/compiler.sml b/src/compiler.sml index 814c48d3..d91d02aa 100644 --- a/src/compiler.sml +++ b/src/compiler.sml @@ -1372,19 +1372,21 @@ val toMono_shake = transform mono_shake "mono_shake1" o toMono_reduce val toMono_opt2 = transform mono_opt "mono_opt2" o toMono_shake +(* val iflow = { func = (fn file => (if !doIflow then Iflow.check file else (); file)), print = MonoPrint.p_file MonoEnv.empty } val toIflow = transform iflow "iflow" o toMono_opt2 +*) val namejs = { func = NameJS.rewrite, print = MonoPrint.p_file MonoEnv.empty } -val toNamejs = transform namejs "namejs" o toIflow +val toNamejs = transform namejs "namejs" o toMono_opt2 val toNamejs_untangle = transform untangle "namejs_untangle" o toNamejs diff --git a/src/sources b/src/sources index 8bf80bc6..1436575d 100644 --- a/src/sources +++ b/src/sources @@ -207,9 +207,6 @@ $(SRC)/mono_shake.sml $(SRC)/fuse.sig $(SRC)/fuse.sml -$(SRC)/iflow.sig -$(SRC)/iflow.sml - $(SRC)/name_js.sig $(SRC)/name_js.sml diff --git a/src/sql.sig b/src/sql.sig index 5f5d1b23..317c157f 100644 --- a/src/sql.sig +++ b/src/sql.sig @@ -81,12 +81,15 @@ datatype sitem = SqField of string * string | SqExp of sqexp * string -type query1 = {Select : sitem list, - From : (string * string) list, - Where : sqexp option} +datatype jtype = Inner | Left | Right | Full -datatype query = - Query1 of query1 +datatype fitem = + Table of string * string (* table AS name *) + | Join of jtype * fitem * fitem * sqexp + | Nested of query * string (* query AS name *) + + and query = + Query1 of {Select : sitem list, From : fitem list, Where : sqexp option} | Union of query * query val query : query parser diff --git a/src/sql.sml b/src/sql.sml index 08315a16..16d4210c 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -382,48 +382,72 @@ val select = log "select" (wrap (follow (const "SELECT ") (list sitem)) (fn ((), ls) => ls)) -val fitem = wrap (follow uw_ident - (follow (const " AS ") - t_ident)) - (fn (t, ((), f)) => (t, f)) +datatype jtype = Inner | Left | Right | Full -val from = log "from" - (wrap (follow (const "FROM ") (list fitem)) - (fn ((), ls) => ls)) +val jtype = wrap (ws (follow (opt (altL [wrap (const "LEFT") (fn () => Left), + wrap (const "RIGHT") (fn () => Right), + wrap (const "FULL") (fn () => Full)])) + (const " JOIN "))) + (fn (SOME jt, ()) => jt | (NONE, ()) => Inner) -val wher = wrap (follow (ws (const "WHERE ")) sqexp) - (fn ((), ls) => ls) - -type query1 = {Select : sitem list, - From : (string * string) list, - Where : sqexp option} - -val query1 = log "query1" - (wrap (follow (follow select from) (opt wher)) - (fn ((fs, ts), wher) => {Select = fs, From = ts, Where = wher})) +datatype fitem = + Table of string * string (* table AS name *) + | Join of jtype * fitem * fitem * sqexp + | Nested of query * string (* query AS name *) -datatype query = - Query1 of query1 + and query = + Query1 of {Select : sitem list, From : fitem list, Where : sqexp option} | Union of query * query +val wher = wrap (follow (ws (const "WHERE ")) sqexp) + (fn ((), ls) => ls) + val orderby = log "orderby" (wrap (follow (ws (const "ORDER BY ")) (follow (list sqexp) (opt (ws (const "DESC"))))) ignore) -fun query chs = log "query" - (wrap - (follow - (alt (wrap (follow (const "((") - (follow query - (follow (const ") UNION (") - (follow query (const "))"))))) - (fn ((), (q1, ((), (q2, ())))) => Union (q1, q2))) - (wrap query1 Query1)) - (opt orderby)) - #1) - chs +fun fitem chs = altL [wrap (follow uw_ident + (follow (const " AS ") + t_ident)) + (fn (t, ((), f)) => Table (t, f)), + wrap (follow (const "(") + (follow fitem + (follow jtype + (follow fitem + (follow (const " ON ") + (follow sqexp + (const ")"))))))) + (fn ((), (fi1, (jt, (fi2, ((), (se, ())))))) => + Join (jt, fi1, fi2, se)), + wrap (follow (const "(") + (follow query + (follow (const ") AS ") t_ident))) + (fn ((), (q, ((), f))) => Nested (q, f))] + chs + +and query1 chs = log "query1" + (wrap (follow (follow select from) (opt wher)) + (fn ((fs, ts), wher) => {Select = fs, From = ts, Where = wher})) + chs + +and from chs = log "from" + (wrap (follow (const "FROM ") (list fitem)) + (fn ((), ls) => ls)) + chs + +and query chs = log "query" + (wrap (follow + (alt (wrap (follow (const "((") + (follow query + (follow (const ") UNION (") + (follow query (const "))"))))) + (fn ((), (q1, ((), (q2, ())))) => Union (q1, q2))) + (wrap query1 Query1)) + (opt orderby)) + #1) + chs datatype dml = Insert of string * (string * sqexp) list diff --git a/src/sqlcache.sml b/src/sqlcache.sml index a8ef647b..9ff7c61d 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -30,11 +30,18 @@ fun mapFst f (x, y) = (f x, y) (* Option monad. *) fun obind (x, f) = Option.mapPartial f x -fun oguard (b, x) = if b then x else NONE +fun oguard (b, x) = if b then x () else NONE fun omap f = fn SOME x => SOME (f x) | _ => NONE fun omap2 f = fn (SOME x, SOME y) => SOME (f (x,y)) | _ => NONE fun osequence ys = List.foldr (omap2 op::) (SOME []) ys +fun concatMap f xs = List.concat (map f xs) + +val rec cartesianProduct : 'a list list -> 'a list list = + fn [] => [[]] + | (xs :: xss) => concatMap (fn ys => concatMap (fn x => [x :: ys]) xs) + (cartesianProduct xss) + fun indexOf test = let fun f n = @@ -104,10 +111,12 @@ val doBind = val dummyLoc = ErrorMsg.dummySpan (* DEBUG *) -fun printExp msg exp = Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_exp MonoEnv.empty exp) -fun printExp' msg exp' = printExp msg (exp', dummyLoc) -fun printTyp msg typ = Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_typ MonoEnv.empty typ) -fun printTyp' msg typ' = printTyp msg (typ', dummyLoc) +fun printExp msg exp = + (Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_exp MonoEnv.empty exp); exp) +fun printExp' msg exp' = (printExp msg (exp', dummyLoc); exp') +fun printTyp msg typ = + (Print.preface ("SQLCACHE: " ^ msg ^ ":", MonoPrint.p_typ MonoEnv.empty typ); typ) +fun printTyp' msg typ' = (printTyp msg (typ', dummyLoc); typ') fun obindDebug printer (x, f) = case x of NONE => NONE @@ -204,13 +213,6 @@ datatype 'atom formula' = val flipJt = fn Conj => Disj | Disj => Conj -fun concatMap f xs = List.concat (map f xs) - -val rec cartesianProduct : 'a list list -> 'a list list = - fn [] => [[]] - | (xs :: xss) => concatMap (fn ys => concatMap (fn x => [x :: ys]) xs) - (cartesianProduct xss) - (* Pushes all negation to the atoms.*) fun pushNegate (normalizeAtom : bool * 'atom -> 'atom) (negating : bool) = fn Atom x => Atom' (normalizeAtom (negating, x)) @@ -349,8 +351,12 @@ end structure AtomOptionKey = OptionKeyFn(AtomExpKey) val rec tablesOfQuery = - fn Sql.Query1 {From = tablePairs, ...} => SS.fromList (map #1 tablePairs) + fn Sql.Query1 {From = fitems, ...} => List.foldl SS.union SS.empty (map tableOfFitem fitems) | Sql.Union (q1, q2) => SS.union (tablesOfQuery q1, tablesOfQuery q2) +and tableOfFitem = + fn Sql.Table (t, _) => SS.singleton t + | Sql.Nested (q, _) => tablesOfQuery q + | Sql.Join (_, f1, f2, _) => SS.union (tableOfFitem f1, tableOfFitem f2) val tableOfDml = fn Sql.Insert (tab, _) => tab @@ -489,43 +495,60 @@ end = struct (* Need lift', etc. because we don't have rank-2 polymorphism. This should probably use a functor (an ML one, not Haskell) but works for now. *) - fun traverseSqexp (pure, _, lift, _, lift'', lift2, _) f = + fun traverseSqexp (pure, _, _, _, lift, lift', _, _, lift2, _, _, _, _, _) f = let val rec tr = fn Sql.SqNot se => lift Sql.SqNot (tr se) | Sql.Binop (r, se1, se2) => lift2 (fn (trse1, trse2) => Sql.Binop (r, trse1, trse2)) (tr se1, tr se2) | Sql.SqKnown se => lift Sql.SqKnown (tr se) - | Sql.Inj (e', loc) => lift'' (fn fe' => Sql.Inj (fe', loc)) (f e') + | Sql.Inj (e', loc) => lift' (fn fe' => Sql.Inj (fe', loc)) (f e') | Sql.SqFunc (s, se) => lift (fn trse => Sql.SqFunc (s, trse)) (tr se) | se => pure se in tr end - fun traverseQuery (ops as (_, pure', _, lift', _, _, lift2')) f = + fun traverseFitem (ops as (_, _, _, pure''', _, _, _, lift''', _, _, _, _, lift2'''', lift2''''')) f = + let + val rec tr = + fn Sql.Table t => pure''' (Sql.Table t) + | Sql.Join (jt, fi1, fi2, se) => + lift2'''' (fn ((trfi1, trfi2), trse) => Sql.Join (jt, trfi1, trfi2, trse)) + (lift2''''' id (tr fi1, tr fi2), traverseSqexp ops f se) + | Sql.Nested (q, s) => lift''' (fn trq => Sql.Nested (trq, s)) + (traverseQuery ops f q) + in + tr + end + + and traverseQuery (ops as (_, pure', pure'', _, _, _, lift'', _, _, lift2', lift2'', lift2''', _, _)) f = let - val rec mp = + val rec seqList = + fn [] => pure'' [] + | (x::xs) => lift2''' op:: (x, seqList xs) + val rec tr = fn Sql.Query1 q => - (case #Where q of - NONE => pure' (Sql.Query1 q) - | SOME se => - lift' (fn mpse => Sql.Query1 {Select = #Select q, - From = #From q, - Where = SOME mpse}) - (traverseSqexp ops f se)) - | Sql.Union (q1, q2) => lift2' Sql.Union (mp q1, mp q2) + (* TODO: make sure we don't need to traverse [#Select q]. *) + lift2' (fn (trfrom, trwher) => Sql.Query1 {Select = #Select q, + From = trfrom, + Where = trwher}) + (seqList (map (traverseFitem ops f) (#From q)), + case #Where q of + NONE => pure' NONE + | SOME se => lift'' SOME (traverseSqexp ops f se)) + | Sql.Union (q1, q2) => lift2'' Sql.Union (tr q1, tr q2) in - mp + tr end (* Include unused tuple elements in argument for convenience of using same argument as [traverseQuery]. *) - fun traverseIM (pure, _, _, _, _, lift2, _) f = + fun traverseIM (pure, _, _, _, _, _, _, _, _, lift2, _, _, _, _) f = IM.foldli (fn (k, v, acc) => lift2 (fn (acc, w) => IM.insert (acc, k, w)) (acc, f (k,v))) (pure IM.empty) - fun traverseSubst (ops as (_, pure', lift, _, _, _, lift2')) f = + fun traverseSubst (ops as (_, pure', _, _, lift, _, _, _, _, lift2', _, _, _, _)) f = let fun mp ((n, fields), sqlify) = lift (fn ((n', fields'), sqlify') => @@ -546,11 +569,14 @@ end = struct traverseIM ops (fn (_, v) => mp v) end - fun monoidOps plus zero = (fn _ => zero, fn _ => zero, - fn _ => fn x => x, fn _ => fn x => x, fn _ => fn x => x, - fn _ => plus, fn _ => plus) + fun monoidOps plus zero = + (fn _ => zero, fn _ => zero, fn _ => zero, fn _ => zero, + fn _ => fn x => x, fn _ => fn x => x, fn _ => fn x => x, fn _ => fn x => x, + fn _ => plus, fn _ => plus, fn _ => plus, fn _ => plus, fn _ => plus, fn _ => plus) - val optionOps = (SOME, SOME, omap, omap, omap, omap2, omap2) + val optionOps = (SOME, SOME, SOME, SOME, + omap, omap, omap, omap, + omap2, omap2, omap2, omap2, omap2, omap2) fun foldMapQuery plus zero = traverseQuery (monoidOps plus zero) val omapQuery = traverseQuery optionOps @@ -727,7 +753,7 @@ val rec sqexpToFormula = | Sql.Null => raise Fail "Sqlcache: sqexpToFormula (Null)" fun mapSqexpFields f = - fn Sql.Field (t, v) => f (t, v) + fn Sql.Field (t, v) => f (t, v) | Sql.SqNot e => Sql.SqNot (mapSqexpFields f e) | Sql.Binop (r, e1, e2) => Sql.Binop (r, mapSqexpFields f e1, mapSqexpFields f e2) | Sql.SqKnown e => Sql.SqKnown (mapSqexpFields f e) @@ -744,12 +770,102 @@ fun renameTables tablePairs = mapSqexpFields (fn (t, f) => Sql.Field (rename t, f)) end -fun queryToFormula marker = - fn Sql.Query1 {Select = sitems, From = tablePairs, Where = wher} => +structure FlattenQuery = struct + + datatype substitution = RenameTable of string | SubstituteExp of Sql.sqexp SM.map + + fun applySubst substTable = + let + fun substitute (table, field) = + case SM.find (substTable, table) of + NONE => Sql.Field (table, field) + | SOME (RenameTable realTable) => Sql.Field (realTable, field) + | SOME (SubstituteExp substField) => + case SM.find (substField, field) of + NONE => raise Fail "Sqlcache: applySubst" + | SOME se => se + in + mapSqexpFields substitute + end + + fun addToSubst (substTable, table, substField) = + SM.insert (substTable, + table, + case substField of + RenameTable _ => substField + | SubstituteExp subst => SubstituteExp (SM.map (applySubst substTable) subst)) + + fun newSubst (t, s) = addToSubst (SM.empty, t, s) + + datatype sitem' = Named of Sql.sqexp * string | Unnamed of Sql.sqexp + + type queryFlat = {Select : sitem' list, Where : Sql.sqexp} + + val sitemsToSubst = + List.foldl (fn (Named (se, s), acc) => SM.insert (acc, s, se) + | (Unnamed _, _) => raise Fail "Sqlcache: sitemsToSubst") + SM.empty + + fun unionSubst (s1, s2) = SM.unionWith (fn _ => raise Fail "Sqlcache: unionSubst") (s1, s2) + + fun sqlAnd (se1, se2) = Sql.Binop (Sql.RLop Sql.And, se1, se2) + + val rec flattenFitem : Sql.fitem -> (Sql.sqexp * substitution SM.map) list = + fn Sql.Table (real, alias) => [(Sql.SqTrue, newSubst (alias, RenameTable real))] + | Sql.Nested (q, s) => + let + val qfs = flattenQuery q + in + map (fn (qf, subst) => + (#Where qf, addToSubst (subst, s, SubstituteExp (sitemsToSubst (#Select qf))))) + qfs + end + | Sql.Join (jt, fi1, fi2, se) => + concatMap (fn ((wher1, subst1)) => + map (fn (wher2, subst2) => + (sqlAnd (wher1, wher2), + (* There should be no name conflicts... Ziv hopes? *) + unionSubst (subst1, subst2))) + (flattenFitem fi2)) + (flattenFitem fi1) + + and flattenQuery : Sql.query -> (queryFlat * substitution SM.map) list = + fn Sql.Query1 q => + let + val fifss = cartesianProduct (map flattenFitem (#From q)) + in + map (fn fifs => + let + val subst = List.foldl (fn ((_, subst), acc) => unionSubst (acc, subst)) + SM.empty + fifs + val wher = List.foldr (fn ((wher, _), acc) => sqlAnd (wher, acc)) + (case #Where q of + NONE => Sql.SqTrue + | SOME wher => wher) + fifs + in + (* ASK: do we actually need to pass the substitution through here? *) + (* We use the substitution later, but it's not clear we + need any of its currently present fields again. *) + ({Select = map (fn Sql.SqExp (se, s) => Named (applySubst subst se, s) + | Sql.SqField tf => + Unnamed (applySubst subst (Sql.Field tf))) + (#Select q), + Where = applySubst subst wher}, + subst) + end) + fifss + end + | Sql.Union (q1, q2) => (flattenQuery q1) @ (flattenQuery q2) + +end + +val flattenQuery = map #1 o FlattenQuery.flattenQuery + +fun queryFlatToFormula marker {Select = sitems, Where = wher} = let - val fWhere = case wher of - NONE => Combo (Conj, []) - | SOME e => sqexpToFormula (renameTables tablePairs e) + val fWhere = sqexpToFormula wher in case marker of NONE => fWhere @@ -757,10 +873,10 @@ fun queryToFormula marker = let val fWhereMarked = mapFormulaExps markFields fWhere val toSqexp = - fn Sql.SqField tf => Sql.Field tf - | Sql.SqExp (se, _) => se + fn FlattenQuery.Named (se, _) => se + | FlattenQuery.Unnamed se => se fun ineq se = Atom (Sql.Ne, se, markFields se) - val fIneqs = Combo (Disj, map (ineq o renameTables tablePairs o toSqexp) sitems) + val fIneqs = Combo (Disj, map (ineq o toSqexp) sitems) in (Combo (Conj, [fWhere, @@ -769,7 +885,8 @@ fun queryToFormula marker = Combo (Conj, [fWhereMarked, fIneqs])])])) end end - | Sql.Union (q1, q2) => Combo (Disj, [queryToFormula marker q1, queryToFormula marker q2]) + +fun queryToFormula marker q = Combo (Disj, map (queryFlatToFormula marker) (flattenQuery q)) fun valsToFormula (markLeft, markRight) (table, vals) = Combo (Conj, @@ -828,7 +945,7 @@ structure ConflictMaps = struct (* If we don't know one side of the comparision, not a contradiction. *) | _ => false in - not (List.exists contradiction atoms) <\oguard\> SOME (UF.classes uf) + not (List.exists contradiction atoms) <\oguard\> (fn _ => SOME (UF.classes uf)) end fun addToEqs (eqs, n, e) = @@ -906,10 +1023,11 @@ structure ConflictMaps = struct mapFormula (toAtomExps DmlRel) (* No eqs should have key conflicts because no variable is in two - equivalence classes, so the [#1] could be [#2]. *) + equivalence classes. *) val mergeEqs : (atomExp IntBinaryMap.map option list -> atomExp IntBinaryMap.map option) = - List.foldr (omap2 (IM.unionWith #1)) (SOME IM.empty) + List.foldr (omap2 (IM.unionWith (fn _ => raise Fail "Sqlcache: ConflictMaps.mergeEqs"))) + (SOME IM.empty) val simplify = map TS.listItems @@ -1008,12 +1126,16 @@ fun fileAllMapfoldB doExp file start = fun fileMap doExp file = #1 (fileAllMapfoldB (fn _ => fn e => fn _ => (doExp e, ())) file ()) (* TODO: make this a bit prettier.... *) +(* TODO: factour out identical subexpressions to the same variable.... *) val simplifySql = let fun factorOutNontrivial text = let val loc = dummyLoc - fun strcat (e1, e2) = (EStrcat (e1, e2), loc) + val strcat = + fn (e1, (EPrim (Prim.String (Prim.Normal, "")), _)) => e1 + | ((EPrim (Prim.String (Prim.Normal, "")), _), e2) => e2 + | (e1, e2) => (EStrcat (e1, e2), loc) val chunks = Sql.chunkify text val (newText, newVariables) = (* Important that this is foldr (to oppose foldl below). *) @@ -1193,7 +1315,7 @@ fun shouldConsolidate args = end fun cacheExp (env, exp', invalInfo, state : state) = - case worthCaching exp' <\oguard\> typOfExp' env exp' of + case worthCaching exp' <\oguard\> (fn _ => typOfExp' env exp') of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => @@ -1202,26 +1324,28 @@ fun cacheExp (env, exp', invalInfo, state : state) = in shouldConsolidate args <\oguard\> - List.foldr (fn (arg, acc) => - acc - <\obind\> - (fn args' => - (case arg of - AsIs exp => SOME exp - | Urlify exp => - typOfExp env exp - <\obind\> - (fn typ => (MonoFooify.urlify env (exp, typ)))) - <\obind\> - (fn arg' => SOME (arg' :: args')))) - (SOME []) - args - <\obind\> - (fn args' => - cacheWrap (env, (exp', dummyLoc), typ, args', #index state) + (fn _ => + List.foldr (fn (arg, acc) => + acc + <\obind\> + (fn args' => + (case arg of + AsIs exp => SOME exp + | Urlify exp => + typOfExp env exp + <\obind\> + (fn typ => (MonoFooify.urlify env (exp, typ)))) + <\obind\> + (fn arg' => SOME (arg' :: args')))) + (SOME []) + args <\obind\> - (fn cachedExp => - SOME (cachedExp, InvalInfo.updateState (invalInfo, length args', state)))) + (fn args' => + cacheWrap (env, (exp', dummyLoc), typ, args', #index state) + <\obind\> + (fn cachedExp => + SOME (cachedExp, + InvalInfo.updateState (invalInfo, length args', state))))) end fun cacheQuery (effs, env, q) : subexp = @@ -1238,20 +1362,22 @@ fun cacheQuery (effs, env, q) : subexp = val {query = queryText, initial, body, ...} = q val attempt = (* Ziv misses Haskell's do notation.... *) - (safe 0 queryText andalso safe 0 initial andalso safe 2 body) + (safe 0 (printExp "attempt" queryText) andalso safe 0 initial andalso safe 2 body) <\oguard\> - Sql.parse Sql.query queryText - <\obind\> - (fn queryParsed => - let - val invalInfo = InvalInfo.singleton queryParsed - fun mkExp state = - case cacheExp (env, EQuery q, invalInfo, state) of - NONE => ((EQuery q, dummyLoc), state) - | SOME (cachedExp, state) => ((cachedExp, dummyLoc), state) - in - SOME (Cachable (invalInfo, mkExp)) - end) + (fn _ => + Sql.parse Sql.query (printExp "safe" queryText) + <\obind\> + (fn queryParsed => + let + val _ = (printExp "parsed" queryText) + val invalInfo = InvalInfo.singleton queryParsed + fun mkExp state = + case cacheExp (env, EQuery q, invalInfo, state) of + NONE => ((EQuery q, dummyLoc), state) + | SOME (cachedExp, state) => ((cachedExp, dummyLoc), state) + in + SOME (Cachable (invalInfo, mkExp)) + end)) in case attempt of NONE => Impure (EQuery q, dummyLoc) @@ -1279,16 +1405,16 @@ fun cacheTree (effs : IS.set) ((env, exp as (exp', loc)), state) = InvalInfo.unbind (invalInfoOfSubexp subexp, unbinds)) (subexps, args))) <\obind\> - (fn invalInfo => - SOME (Cachable (invalInfo, - fn state => - case cacheExp (env, - f (map (#2 o #1) args), - invalInfo, - state) of - NONE => mkExp state - | SOME (e', state) => ((e', loc), state)), - state)) + (fn invalInfo => + SOME (Cachable (invalInfo, + fn state => + case cacheExp (env, + f (map (#2 o #1) args), + invalInfo, + state) of + NONE => mkExp state + | SOME (e', state) => ((e', loc), state)), + state)) in case attempt of SOME (subexp, state) => (subexp, state) @@ -1384,7 +1510,7 @@ structure Invalidations = struct DmlRel n => ERel n | Prim p => EPrim p (* TODO: make new type containing only these two. *) - | _ => raise Fail "Sqlcache: optionAtomExpToExp", + | _ => raise Fail "Sqlcache: Invalidations.optionAtomExpToExp", loc)), loc) @@ -1506,8 +1632,8 @@ fun addLocking file = ListMergeSort.sort (fn ((i, _), (j, _)) => i > j) ls end fun locksOfName n = - lockList {store = IIMM.findSet (#flush lockMap, n), - flush =IIMM.findSet (#store lockMap, n)} + lockList {flush = IIMM.findSet (#flush lockMap, n), + store = IIMM.findSet (#store lockMap, n)} val locksOfExp = lockList o locksNeeded lockMap val expts = exports file fun doVal (v as (x, n, t, exp, s)) = -- cgit v1.2.3 From 0c231060050adf556348b06f078c994f4a0e65b4 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 19 Nov 2015 03:45:39 -0500 Subject: Fix SQL parser JOIN bug and fix ON clause logic in Sqlcache. --- caching-tests/test.ur | 5 +++-- src/sql.sml | 17 ++++++++--------- src/sqlcache.sml | 14 ++++++++------ 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/caching-tests/test.ur b/caching-tests/test.ur index ea64bb2d..e0dab927 100644 --- a/caching-tests/test.ur +++ b/caching-tests/test.ur @@ -1,12 +1,13 @@ table tab : {Id : int, Val : int, Foo : int} PRIMARY KEY Id fun cache id = - res <- oneOrNoRows (SELECT tab.Val FROM tab WHERE tab.Id = {[id]}); + res <- oneOrNoRows (SELECT A.Val FROM (tab AS A JOIN tab AS B ON A.Id = B.Id) + WHERE B.Id = {[id]}); return cache {case res of None => ? - | Some row => {[row.Tab.Val]}} + | Some row => {[row.A.Val]}} (* fun cacheAlt id = *) diff --git a/src/sql.sml b/src/sql.sml index 16d4210c..dfe2f968 100644 --- a/src/sql.sml +++ b/src/sql.sml @@ -384,12 +384,6 @@ val select = log "select" datatype jtype = Inner | Left | Right | Full -val jtype = wrap (ws (follow (opt (altL [wrap (const "LEFT") (fn () => Left), - wrap (const "RIGHT") (fn () => Right), - wrap (const "FULL") (fn () => Full)])) - (const " JOIN "))) - (fn (SOME jt, ()) => jt | (NONE, ()) => Inner) - datatype fitem = Table of string * string (* table AS name *) | Join of jtype * fitem * fitem * sqexp @@ -404,17 +398,22 @@ val wher = wrap (follow (ws (const "WHERE ")) sqexp) val orderby = log "orderby" (wrap (follow (ws (const "ORDER BY ")) - (follow (list sqexp) - (opt (ws (const "DESC"))))) + (list (follow sqexp + (opt (ws (const "DESC")))))) ignore) +val jtype = altL [wrap (const "JOIN") (fn () => Inner), + wrap (const "LEFT JOIN") (fn () => Left), + wrap (const "RIGHT JOIN") (fn () => Right), + wrap (const "FULL JOIN") (fn () => Full)] + fun fitem chs = altL [wrap (follow uw_ident (follow (const " AS ") t_ident)) (fn (t, ((), f)) => Table (t, f)), wrap (follow (const "(") (follow fitem - (follow jtype + (follow (ws jtype) (follow fitem (follow (const " ON ") (follow sqexp diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 9ff7c61d..ce5ad5f5 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -823,9 +823,12 @@ structure FlattenQuery = struct | Sql.Join (jt, fi1, fi2, se) => concatMap (fn ((wher1, subst1)) => map (fn (wher2, subst2) => - (sqlAnd (wher1, wher2), - (* There should be no name conflicts... Ziv hopes? *) - unionSubst (subst1, subst2))) + let + val subst = unionSubst (subst1, subst2) + in + (* ON clause becomes part of the accumulated WHERE. *) + (sqlAnd (sqlAnd (wher1, wher2), applySubst subst se), subst) + end) (flattenFitem fi2)) (flattenFitem fi1) @@ -1362,14 +1365,13 @@ fun cacheQuery (effs, env, q) : subexp = val {query = queryText, initial, body, ...} = q val attempt = (* Ziv misses Haskell's do notation.... *) - (safe 0 (printExp "attempt" queryText) andalso safe 0 initial andalso safe 2 body) + (safe 0 queryText andalso safe 0 initial andalso safe 2 body) <\oguard\> (fn _ => - Sql.parse Sql.query (printExp "safe" queryText) + Sql.parse Sql.query queryText <\obind\> (fn queryParsed => let - val _ = (printExp "parsed" queryText) val invalInfo = InvalInfo.singleton queryParsed fun mkExp state = case cacheExp (env, EQuery q, invalInfo, state) of -- cgit v1.2.3 From 7a49a90f8b092e1c2e58d3e754578cff3bf06b18 Mon Sep 17 00:00:00 2001 From: Adam Chlipala Date: Thu, 19 Nov 2015 10:31:47 -0500 Subject: Fix a few C memory bugs --- src/c/urweb.c | 10 ++++++---- src/lru_cache.sml | 16 +++++++++------- 2 files changed, 15 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index c1cfe94c..945a6890 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -602,6 +602,8 @@ uw_context uw_init(int id, uw_loggers *lg) { ctx->remoteSock = -1; + ctx->cacheUnlock = NULL; + return ctx; } @@ -3681,7 +3683,7 @@ failure_kind uw_initialize(uw_context ctx) { if (r == 0) { uw_ensure_transaction(ctx); ctx->app->initializer(ctx); - if (ctx->app->db_commit(ctx)) + if (uw_commit(ctx)) uw_error(ctx, FATAL, "Error running SQL COMMIT"); } @@ -4626,7 +4628,7 @@ static char *uw_Sqlcache_allocKeyBuffer(char **keys, size_t numKeys) { while (numKeys-- > 0) { char* k = keys[numKeys]; if (!k) { - // Can only happen when flushihg, in which case we don't need anything past the null key. + // Can only happen when flushing, in which case we don't need anything past the null key. break; } // Leave room for separator. @@ -4695,7 +4697,7 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw if (numKeys == 0) { entry = cache->table; if (!entry) { - entry = malloc(sizeof(uw_Sqlcache_Entry)); + entry = calloc(1, sizeof(uw_Sqlcache_Entry)); entry->key = NULL; entry->value = NULL; entry->timeInvalid = 0; @@ -4709,7 +4711,7 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw size_t len = buf - key; entry = uw_Sqlcache_find(cache, key, len, 1); if (!entry) { - entry = malloc(sizeof(uw_Sqlcache_Entry)); + entry = calloc(1, sizeof(uw_Sqlcache_Entry)); entry->key = strdup(key); entry->value = NULL; entry->timeInvalid = 0; diff --git a/src/lru_cache.sml b/src/lru_cache.sml index e9ed5f73..5c05b261 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -111,16 +111,16 @@ fun setupQuery {index, params} = (* If the output is null, it means we had too much recursion, so it's a miss. *) string " if (v && v->output != NULL) {", newline, - (* string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), *) - (* newline, *) + (*string (" puts(\"SQLCACHE: hit " ^ i ^ ".\");"), + newline,*) string " uw_write(ctx, v->output);", newline, string " return v->result;", newline, string " } else {", newline, - (* string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), *) - (* newline, *) + (*string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), + newline,*) string " uw_recordingStart(ctx);", newline, string " return NULL;", @@ -136,14 +136,16 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_Value *v = malloc(sizeof(uw_Sqlcache_Value));"), + string (" uw_Sqlcache_Value *v = calloc(1, sizeof(uw_Sqlcache_Value));"), newline, string " v->result = strdup(s);", newline, string " v->output = uw_recordingRead(ctx);", newline, - (* string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), *) - (* newline, *) + string " v->timeValid = 0;", + newline, + (*string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), + newline,*) string (" uw_Sqlcache_store(ctx, cache" ^ i ^ ", ks, v);"), newline, string " return uw_unit_v;", -- cgit v1.2.3 From 30dd885d1fc3013be0e3c2a45b2e0117f684f40a Mon Sep 17 00:00:00 2001 From: Adam Chlipala Date: Thu, 19 Nov 2015 13:18:58 -0500 Subject: Fix a read-after-free bug using a timestamp check --- src/c/urweb.c | 9 ++++++--- src/lru_cache.sml | 4 +--- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 945a6890..093a5294 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4720,9 +4720,11 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw } free(key); } - uw_Sqlcache_freeValue(entry->value); - entry->value = value; - entry->value->timeValid = timeNow; + if (entry->value && entry->value->timeValid < value->timeValid) { + uw_Sqlcache_freeValue(entry->value); + entry->value = value; + entry->value->timeValid = timeNow; + } pthread_rwlock_unlock(&cache->lockIn); } @@ -4807,6 +4809,7 @@ void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw update->keys = uw_Sqlcache_copyKeys(keys, cache->numKeys); update->value = value; update->next = NULL; + value->timeValid = uw_Sqlcache_getTimeNow(cache); if (ctx->cacheUpdateTail) { ctx->cacheUpdateTail->next = update; } else { diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 5c05b261..851b4ccb 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -136,14 +136,12 @@ fun setupQuery {index, params} = newline, string (" char *ks[] = {" ^ revArgs ^ "};"), newline, - string (" uw_Sqlcache_Value *v = calloc(1, sizeof(uw_Sqlcache_Value));"), + string (" uw_Sqlcache_Value *v = malloc(sizeof(uw_Sqlcache_Value));"), newline, string " v->result = strdup(s);", newline, string " v->output = uw_recordingRead(ctx);", newline, - string " v->timeValid = 0;", - newline, (*string (" puts(\"SQLCACHE: stored " ^ i ^ ".\");"), newline,*) string (" uw_Sqlcache_store(ctx, cache" ^ i ^ ", ks, v);"), -- cgit v1.2.3 From 027ffcf5b2e3f71a42857547b17b0824d38a3f85 Mon Sep 17 00:00:00 2001 From: Adam Chlipala Date: Thu, 19 Nov 2015 16:02:04 -0500 Subject: Fix condition for installing new cache entries --- src/c/urweb.c | 26 +++++++++++++++----------- src/lru_cache.sml | 10 +++++++++- tests/fib.ur | 10 ++++++++++ 3 files changed, 34 insertions(+), 12 deletions(-) create mode 100644 tests/fib.ur (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 093a5294..54135666 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -504,8 +504,8 @@ struct uw_context { size_t output_buffer_size; // Sqlcache. - int numRecording; - int recordingOffset; + int numRecording, recordingCapacity; + int *recordingOffsets; uw_Sqlcache_Update *cacheUpdate; uw_Sqlcache_Update *cacheUpdateTail; uw_Sqlcache_Unlock *cacheUnlock; @@ -596,7 +596,8 @@ uw_context uw_init(int id, uw_loggers *lg) { ctx->output_buffer_size = 1; ctx->numRecording = 0; - ctx->recordingOffset = 0; + ctx->recordingCapacity = 0; + ctx->recordingOffsets = malloc(0); ctx->cacheUpdate = NULL; ctx->cacheUpdateTail = NULL; @@ -669,6 +670,8 @@ void uw_free(uw_context ctx) { free(ctx->output_buffer); + free(ctx->recordingOffsets); + free(ctx); } @@ -692,6 +695,7 @@ void uw_reset_keep_error_message(uw_context ctx) { ctx->usedSig = 0; ctx->needsResig = 0; ctx->remoteSock = -1; + ctx->numRecording = 0; } void uw_reset_keep_request(uw_context ctx) { @@ -1739,17 +1743,16 @@ void uw_write(uw_context ctx, const char* s) { } void uw_recordingStart(uw_context ctx) { - if (ctx->numRecording++ == 0) { - ctx->recordingOffset = ctx->page.front - ctx->page.start; + if (ctx->numRecording == ctx->recordingCapacity) { + ++ctx->recordingCapacity; + ctx->recordingOffsets = realloc(ctx->recordingOffsets, sizeof(int) * ctx->recordingCapacity); } + ctx->recordingOffsets[ctx->numRecording] = ctx->page.front - ctx->page.start; + ++ctx->numRecording; } char *uw_recordingRead(uw_context ctx) { - // Only the outermost recorder can read unless the recording is empty. - char *recording = ctx->page.start + ctx->recordingOffset; - if (--ctx->numRecording > 0 && recording != ctx->page.front) { - return NULL; - } + char *recording = ctx->page.start + ctx->recordingOffsets[--ctx->numRecording]; return strdup(recording); } @@ -4709,6 +4712,7 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw while (numKeys-- > 0) { buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); size_t len = buf - key; + entry = uw_Sqlcache_find(cache, key, len, 1); if (!entry) { entry = calloc(1, sizeof(uw_Sqlcache_Entry)); @@ -4720,7 +4724,7 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw } free(key); } - if (entry->value && entry->value->timeValid < value->timeValid) { + if (!entry->value || entry->value->timeValid < value->timeValid) { uw_Sqlcache_freeValue(entry->value); entry->value = value; entry->value->timeValid = timeNow; diff --git a/src/lru_cache.sml b/src/lru_cache.sml index 851b4ccb..81000458 100644 --- a/src/lru_cache.sml +++ b/src/lru_cache.sml @@ -65,6 +65,7 @@ fun setupQuery {index, params} = val revArgs = paramRepeatRev (fn p => "p" ^ p) ", " + val argNums = List.tabulate (params, fn i => "p" ^ Int.toString i) in Print.box [string ("static uw_Sqlcache_Cache cacheStruct" ^ i ^ " = {"), @@ -119,7 +120,12 @@ fun setupQuery {index, params} = newline, string " } else {", newline, - (*string (" puts(\"SQLCACHE: miss " ^ i ^ ".\");"), + (*string (" printf(\"SQLCACHE: miss " ^ i ^ " " ^ String.concatWith ", " (List.tabulate (params, fn _ => "%s")) ^ ".\\n\""), + (case argNums of + [] => Print.box [] + | _ => Print.box [string ", ", + p_list string argNums]), + string ");", newline,*) string " uw_recordingStart(ctx);", newline, @@ -159,6 +165,8 @@ fun setupQuery {index, params} = newline, string (" uw_Sqlcache_flush(ctx, cache" ^ i ^ ", ks);"), newline, + (*string (" puts(\"SQLCACHE: flushed " ^ i ^ ".\");"), + newline,*) string " return uw_unit_v;", newline, string "}", diff --git a/tests/fib.ur b/tests/fib.ur new file mode 100644 index 00000000..9d7fd340 --- /dev/null +++ b/tests/fib.ur @@ -0,0 +1,10 @@ +fun fib n = + if n = 0 then + 0 + else if n = 1 then + 1 + else + fib (n - 1) + fib (n - 2) + +fun main n : transaction page = + return {[fib n]} -- cgit v1.2.3 From 94b1dbce1ae20ded6b2e8cc519f56ac9e3b39b24 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 19 Nov 2015 17:29:47 -0500 Subject: Add consolidation heuristic options. --- src/main.mlton.sml | 3 ++ src/settings.sig | 2 ++ src/settings.sml | 4 +++ src/sqlcache.sml | 95 +++++++++++++++++++++++++++++------------------------- 4 files changed, 60 insertions(+), 44 deletions(-) (limited to 'src') diff --git a/src/main.mlton.sml b/src/main.mlton.sml index 3ae968b0..d3d88af9 100644 --- a/src/main.mlton.sml +++ b/src/main.mlton.sml @@ -162,6 +162,9 @@ fun oneRun args = | "-sqlcache" :: rest => (Settings.setSqlcache true; doArgs rest) + | "-heuristic" :: h :: rest => + (Settings.setSqlcacheHeuristic h; + doArgs rest) | "-moduleOf" :: fname :: _ => (print (Compiler.moduleOf fname ^ "\n"); raise Code OS.Process.success) diff --git a/src/settings.sig b/src/settings.sig index e94832e0..d4bb4b08 100644 --- a/src/settings.sig +++ b/src/settings.sig @@ -281,6 +281,8 @@ signature SETTINGS = sig val setSqlcache : bool -> unit val getSqlcache : unit -> bool + val setSqlcacheHeuristic : string -> unit + val getSqlcacheHeuristic : unit -> string val setFilePath : string -> unit (* Sets the directory where we look for files being added below. *) diff --git a/src/settings.sml b/src/settings.sml index f9125c64..073e7883 100644 --- a/src/settings.sml +++ b/src/settings.sml @@ -811,6 +811,10 @@ val sqlcache = ref false fun setSqlcache b = sqlcache := b fun getSqlcache () = !sqlcache +val sqlcacheHeuristic = ref "always" +fun setSqlcacheHeuristic h = sqlcacheHeuristic := h +fun getSqlcacheHeuristic () = !sqlcacheHeuristic + structure SM = BinaryMapFn(struct type ord_key = string val compare = String.compare diff --git a/src/sqlcache.sml b/src/sqlcache.sml index ce5ad5f5..312ee217 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -93,9 +93,17 @@ val cacheRef = ref LruCache.cache fun setCache c = cacheRef := c fun getCache () = !cacheRef -val alwaysConsolidateRef = ref true -fun setAlwaysConsolidate b = alwaysConsolidateRef := b -fun getAlwaysConsolidate () = !alwaysConsolidateRef +datatype heuristic = Always | Never | NoPureAll | NoPureOne | NoCombo + +val heuristicRef = ref Always +fun setHeuristic h = heuristicRef := (case h of + "always" => Always + | "never" => Never + | "nopureall" => NoPureAll + | "nopureone" => NoPureOne + | "nocombo" => NoCombo + | _ => raise Fail "Sqlcache: setHeuristic") +fun getHeuristic () = !heuristicRef (************************) @@ -463,7 +471,7 @@ structure InvalInfo :> sig val empty : t val singleton : Sql.query -> t val query : t -> Sql.query - val orderArgs : t * Mono.exp -> cacheArg list + val orderArgs : t * Mono.exp -> cacheArg list option val unbind : t * unbind -> t option val union : t * t -> t val updateState : t * int * state -> state @@ -635,11 +643,20 @@ end = struct val argsMap = sqlArgsMap qs val args = map (expOfArg o #1) (AM.listItemsi argsMap) val invalPaths = List.foldl PS.union PS.empty (map freePaths args) + (* TODO: make sure these variables are okay to remove from the argument list. *) + val pureArgs = PS.difference (paths, invalPaths) + val shouldCache = + case getHeuristic () of + Always => true + | Never => (case qs of [_] => true | _ => false) + | NoPureAll => (case qs of [] => false | _ => true) + | NoPureOne => (case qs of [] => false | _ => PS.numItems pureArgs = 0) + | NoCombo => PS.numItems pureArgs = 0 orelse AM.numItems argsMap = 0 in (* Put arguments we might invalidate by first. *) - map AsIs args - (* TODO: make sure these variables are okay to remove from the argument list. *) - @ map (Urlify o expOfPath) (PS.listItems (PS.difference (paths, invalPaths))) + if shouldCache + then SOME (map AsIs args @ map (Urlify o expOfPath) (PS.listItems pureArgs)) + else NONE end (* As a kludge, we rename the variables in the query to correspond to the @@ -1309,47 +1326,35 @@ val worthCaching = fn EQuery _ => true | exp' => expSize (exp', dummyLoc) > sizeWorthCaching -fun shouldConsolidate args = - let - val isAsIs = fn AsIs _ => true | Urlify _ => false - in - getAlwaysConsolidate () - orelse not (List.exists isAsIs args andalso List.exists (not o isAsIs) args) - end - fun cacheExp (env, exp', invalInfo, state : state) = case worthCaching exp' <\oguard\> (fn _ => typOfExp' env exp') of NONE => NONE | SOME (TFun _, _) => NONE | SOME typ => - let - val args = InvalInfo.orderArgs (invalInfo, (exp', dummyLoc)) - in - shouldConsolidate args - <\oguard\> - (fn _ => - List.foldr (fn (arg, acc) => - acc - <\obind\> - (fn args' => - (case arg of - AsIs exp => SOME exp - | Urlify exp => - typOfExp env exp - <\obind\> - (fn typ => (MonoFooify.urlify env (exp, typ)))) - <\obind\> - (fn arg' => SOME (arg' :: args')))) - (SOME []) - args - <\obind\> - (fn args' => - cacheWrap (env, (exp', dummyLoc), typ, args', #index state) - <\obind\> - (fn cachedExp => - SOME (cachedExp, - InvalInfo.updateState (invalInfo, length args', state))))) - end + InvalInfo.orderArgs (invalInfo, (exp', dummyLoc)) + <\obind\> + (fn args => + List.foldr (fn (arg, acc) => + acc + <\obind\> + (fn args' => + (case arg of + AsIs exp => SOME exp + | Urlify exp => + typOfExp env exp + <\obind\> + (fn typ => (MonoFooify.urlify env (exp, typ)))) + <\obind\> + (fn arg' => SOME (arg' :: args')))) + (SOME []) + args + <\obind\> + (fn args' => + cacheWrap (env, (exp', dummyLoc), typ, args', #index state) + <\obind\> + (fn cachedExp => + SOME (cachedExp, + InvalInfo.updateState (invalInfo, length args', state))))) fun cacheQuery (effs, env, q) : subexp = let @@ -1684,7 +1689,9 @@ val go' = addLocking o addFlushing o addCaching o simplifySql o inlineSql fun go file = let (* TODO: do something nicer than [Sql] being in one of two modes. *) - val () = (resetFfiInfo (); Sql.sqlcacheMode := true) + val () = (resetFfiInfo (); + Sql.sqlcacheMode := true; + setHeuristic (Settings.getSqlcacheHeuristic ())) val file = go' file (* Important that this happens after [MonoFooify.urlify] calls! *) val fmDecls = MonoFooify.getNewFmDecls () -- cgit v1.2.3 From ff19a9c86b380918f50e294848be06f29b2ba1dd Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Thu, 19 Nov 2015 18:13:01 -0500 Subject: More work on heuristics. --- src/sqlcache.sml | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/sqlcache.sml b/src/sqlcache.sml index 312ee217..b2c8504b 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -93,11 +93,13 @@ val cacheRef = ref LruCache.cache fun setCache c = cacheRef := c fun getCache () = !cacheRef -datatype heuristic = Always | Never | NoPureAll | NoPureOne | NoCombo +datatype heuristic = SmartEq (* | SmartSub *) | Always | Never | NoPureAll | NoPureOne | NoCombo val heuristicRef = ref Always fun setHeuristic h = heuristicRef := (case h of - "always" => Always + "smarteq" => SmartEq + (* | "smartsub" => SmartSub *) + | "always" => Always | "never" => Never | "nopureall" => NoPureAll | "nopureone" => NoPureOne @@ -613,13 +615,13 @@ end = struct val union = op@ + fun addToSqlArgsMap ((q, subst), acc) = + IM.foldl (fn (arg, acc) => AM.insert (acc, arg, ())) acc subst + fun sqlArgsMap (qs : t) = let val args = - List.foldl (fn ((q, subst), acc) => - IM.foldl (fn (arg, acc) => AM.insert (acc, arg, ())) acc subst) - AM.empty - qs + List.foldl addToSqlArgsMap AM.empty qs val countRef = ref (~1) fun count () = (countRef := !countRef + 1; !countRef) in @@ -647,8 +649,31 @@ end = struct val pureArgs = PS.difference (paths, invalPaths) val shouldCache = case getHeuristic () of - Always => true - | Never => (case qs of [_] => true | _ => false) + SmartEq => + (case (qs, PS.numItems pureArgs) of + ((q::qs), 0) => + let + val m = addToSqlArgsMap (q, AM.empty) + val ms = map (fn q => addToSqlArgsMap (q, AM.empty)) qs + fun test (m, acc) = + acc + <\obind\> + (fn m' => + let + val mm = AM.unionWith #1 (m, m') + in + AM.numItems m = AM.numItems mm + <\oguard\> + (fn _ => SOME mm) + end) + in + case List.foldl test (SOME m) ms of + NONE => false + | SOME _ => true + end + | _ => false) + | Always => true + | Never => (case qs of [_] => PS.numItems pureArgs = 0 | _ => false) | NoPureAll => (case qs of [] => false | _ => true) | NoPureOne => (case qs of [] => false | _ => PS.numItems pureArgs = 0) | NoCombo => PS.numItems pureArgs = 0 orelse AM.numItems argsMap = 0 -- cgit v1.2.3 From a0d66adaeceaa07e4006a0570211f7453a5b5738 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 20 Nov 2015 03:26:21 -0500 Subject: Tweak cache consolidation and choose better default. --- src/main.mlton.sml | 2 +- src/settings.sig | 2 -- src/settings.sml | 4 ---- src/sqlcache.sig | 2 ++ src/sqlcache.sml | 40 +++++++++++++++++++++------------------- 5 files changed, 24 insertions(+), 26 deletions(-) (limited to 'src') diff --git a/src/main.mlton.sml b/src/main.mlton.sml index d3d88af9..164ddfbd 100644 --- a/src/main.mlton.sml +++ b/src/main.mlton.sml @@ -163,7 +163,7 @@ fun oneRun args = (Settings.setSqlcache true; doArgs rest) | "-heuristic" :: h :: rest => - (Settings.setSqlcacheHeuristic h; + (Sqlcache.setHeuristic h; doArgs rest) | "-moduleOf" :: fname :: _ => (print (Compiler.moduleOf fname ^ "\n"); diff --git a/src/settings.sig b/src/settings.sig index d4bb4b08..e94832e0 100644 --- a/src/settings.sig +++ b/src/settings.sig @@ -281,8 +281,6 @@ signature SETTINGS = sig val setSqlcache : bool -> unit val getSqlcache : unit -> bool - val setSqlcacheHeuristic : string -> unit - val getSqlcacheHeuristic : unit -> string val setFilePath : string -> unit (* Sets the directory where we look for files being added below. *) diff --git a/src/settings.sml b/src/settings.sml index 073e7883..f9125c64 100644 --- a/src/settings.sml +++ b/src/settings.sml @@ -811,10 +811,6 @@ val sqlcache = ref false fun setSqlcache b = sqlcache := b fun getSqlcache () = !sqlcache -val sqlcacheHeuristic = ref "always" -fun setSqlcacheHeuristic h = sqlcacheHeuristic := h -fun getSqlcacheHeuristic () = !sqlcacheHeuristic - structure SM = BinaryMapFn(struct type ord_key = string val compare = String.compare diff --git a/src/sqlcache.sig b/src/sqlcache.sig index fabc9ebf..e264c1f0 100644 --- a/src/sqlcache.sig +++ b/src/sqlcache.sig @@ -3,6 +3,8 @@ signature SQLCACHE = sig val setCache : Cache.cache -> unit val getCache : unit -> Cache.cache +val setHeuristic : string -> unit + val getFfiInfo : unit -> {index : int, params : int} list val go : Mono.file -> Mono.file diff --git a/src/sqlcache.sml b/src/sqlcache.sml index b2c8504b..75a17e48 100644 --- a/src/sqlcache.sml +++ b/src/sqlcache.sml @@ -93,12 +93,11 @@ val cacheRef = ref LruCache.cache fun setCache c = cacheRef := c fun getCache () = !cacheRef -datatype heuristic = SmartEq (* | SmartSub *) | Always | Never | NoPureAll | NoPureOne | NoCombo +datatype heuristic = Smart | Always | Never | NoPureAll | NoPureOne | NoCombo -val heuristicRef = ref Always +val heuristicRef = ref NoPureOne fun setHeuristic h = heuristicRef := (case h of - "smarteq" => SmartEq - (* | "smartsub" => SmartSub *) + "smart" => Smart | "always" => Always | "never" => Never | "nopureall" => NoPureAll @@ -498,6 +497,7 @@ end = struct structure I = SK structure J = SK structure K = struct type ord_key = Mono.typ val compare = MonoUtil.Typ.compare end))) + structure AS = BinarySetFn(AK) structure AM = BinaryMapFn(AK) (* Traversal Utilities *) @@ -615,13 +615,16 @@ end = struct val union = op@ - fun addToSqlArgsMap ((q, subst), acc) = - IM.foldl (fn (arg, acc) => AM.insert (acc, arg, ())) acc subst + fun sqlArgsSet (q, subst) = + IM.foldl AS.add' AS.empty subst fun sqlArgsMap (qs : t) = let val args = - List.foldl addToSqlArgsMap AM.empty qs + List.foldl (fn ((q, subst), acc) => + IM.foldl (fn (arg, acc) => AM.insert (acc, arg, ())) acc subst) + AM.empty + qs val countRef = ref (~1) fun count () = (countRef := !countRef + 1; !countRef) in @@ -649,25 +652,26 @@ end = struct val pureArgs = PS.difference (paths, invalPaths) val shouldCache = case getHeuristic () of - SmartEq => + Smart => (case (qs, PS.numItems pureArgs) of ((q::qs), 0) => let - val m = addToSqlArgsMap (q, AM.empty) - val ms = map (fn q => addToSqlArgsMap (q, AM.empty)) qs - fun test (m, acc) = + val args = sqlArgsSet q + val argss = map sqlArgsSet qs + fun test (args, acc) = acc <\obind\> - (fn m' => + (fn args' => let - val mm = AM.unionWith #1 (m, m') + val both = AS.union (args, args') in - AM.numItems m = AM.numItems mm + (AS.numItems args = AS.numItems both + orelse AS.numItems args' = AS.numItems both) <\oguard\> - (fn _ => SOME mm) + (fn _ => SOME both) end) in - case List.foldl test (SOME m) ms of + case List.foldl test (SOME args) argss of NONE => false | SOME _ => true end @@ -1714,9 +1718,7 @@ val go' = addLocking o addFlushing o addCaching o simplifySql o inlineSql fun go file = let (* TODO: do something nicer than [Sql] being in one of two modes. *) - val () = (resetFfiInfo (); - Sql.sqlcacheMode := true; - setHeuristic (Settings.getSqlcacheHeuristic ())) + val () = (resetFfiInfo (); Sql.sqlcacheMode := true) val file = go' file (* Important that this happens after [MonoFooify.urlify] calls! *) val fmDecls = MonoFooify.getNewFmDecls () -- cgit v1.2.3 From 081f815b457cdfe759b733a9adc18aab32127e45 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 20 Nov 2015 10:51:43 -0500 Subject: Tiny concurrency bugfix (race condition on cache->timeNow). --- src/c/urweb.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 54135666..12009f02 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4617,7 +4617,8 @@ static void uw_Sqlcache_add(uw_Sqlcache_Cache *cache, uw_Sqlcache_Entry *entry, } static unsigned long uw_Sqlcache_getTimeNow(uw_Sqlcache_Cache *cache) { - return ++cache->timeNow; + // TODO: verify that this makes time comparisons do the Right Thing. + return cache->timeNow++; } static unsigned long uw_Sqlcache_timeMax(unsigned long x, unsigned long y) { @@ -4689,7 +4690,7 @@ uw_Sqlcache_Value *uw_Sqlcache_check(uw_context ctx, uw_Sqlcache_Cache *cache, c // Returning outside the lock is safe because updates happen at commit time. // Those are the only times the returned value or its strings can get freed. // Handler output is a new string, so it's safe to free this at commit time. - return value && value->timeValid > timeInvalid ? value : NULL; + return value && timeInvalid < value->timeValid ? value : NULL; } static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw_Sqlcache_Value *value) { @@ -4712,7 +4713,7 @@ static void uw_Sqlcache_storeCommitOne(uw_Sqlcache_Cache *cache, char **keys, uw while (numKeys-- > 0) { buf = uw_Sqlcache_keyCopy(buf, keys[numKeys]); size_t len = buf - key; - + entry = uw_Sqlcache_find(cache, key, len, 1); if (!entry) { entry = calloc(1, sizeof(uw_Sqlcache_Entry)); @@ -4813,7 +4814,8 @@ void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw update->keys = uw_Sqlcache_copyKeys(keys, cache->numKeys); update->value = value; update->next = NULL; - value->timeValid = uw_Sqlcache_getTimeNow(cache); + // Can't use [uw_Sqlcache_getTimeNow] because it modifies state and we don't have the lock. + value->timeValid = cache->timeNow; if (ctx->cacheUpdateTail) { ctx->cacheUpdateTail->next = update; } else { -- cgit v1.2.3 From 0271786bacdf9c12a142367a479b24ba111ebd17 Mon Sep 17 00:00:00 2001 From: Ziv Scully Date: Fri, 20 Nov 2015 11:51:14 -0500 Subject: Add read locks around time read in store. --- src/c/urweb.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/c/urweb.c b/src/c/urweb.c index 12009f02..a6639ef2 100644 --- a/src/c/urweb.c +++ b/src/c/urweb.c @@ -4815,7 +4815,9 @@ void uw_Sqlcache_store(uw_context ctx, uw_Sqlcache_Cache *cache, char **keys, uw update->value = value; update->next = NULL; // Can't use [uw_Sqlcache_getTimeNow] because it modifies state and we don't have the lock. + pthread_rwlock_rdlock(&cache->lockIn); value->timeValid = cache->timeNow; + pthread_rwlock_unlock(&cache->lockIn); if (ctx->cacheUpdateTail) { ctx->cacheUpdateTail->next = update; } else { -- cgit v1.2.3