aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Adam Chlipala <adam@chlipala.net>2018-06-03 15:01:24 -0400
committerGravatar Adam Chlipala <adam@chlipala.net>2018-06-03 15:01:24 -0400
commitf3373fd5809689bece7fd390f2d737aa0b43f594 (patch)
tree739be9eaa093fbf25bc69317811b869775c1ba80 /src
parent1c493e9ec47f4754dd7237078e8c4f3300925ce3 (diff)
'filecache' .urp directive, fixing a longstanding MonoUtil bug in the process
Diffstat (limited to 'src')
-rw-r--r--src/c/urweb.c126
-rw-r--r--src/cjr_print.sml24
-rw-r--r--src/compiler.sig3
-rw-r--r--src/compiler.sml19
-rw-r--r--src/demo.sml1
-rw-r--r--src/filecache.sig35
-rw-r--r--src/filecache.sml230
-rw-r--r--src/mono_util.sml6
-rw-r--r--src/mysql.sml3
-rw-r--r--src/postgres.sml3
-rw-r--r--src/settings.sig6
-rw-r--r--src/settings.sml10
-rw-r--r--src/sources3
-rw-r--r--src/sqlite.sml3
14 files changed, 460 insertions, 12 deletions
diff --git a/src/c/urweb.c b/src/c/urweb.c
index 283efcdd..e7efae38 100644
--- a/src/c/urweb.c
+++ b/src/c/urweb.c
@@ -13,8 +13,8 @@
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
-#include <openssl/des.h>
#include <openssl/rand.h>
+#include <openssl/sha.h>
#include <time.h>
#include <math.h>
@@ -514,6 +514,11 @@ struct uw_context {
uw_Sqlcache_Unlock *cacheUnlock;
int remoteSock;
+
+ int file_cache_missed;
+ // Set if we are recovering from a miss in the file cache in handling an SQL
+ // query that only returns hashes of files. If so, this time around we will
+ // run queries to return actual file contents instead.
};
size_t uw_headers_max = SIZE_MAX;
@@ -608,6 +613,8 @@ uw_context uw_init(int id, uw_loggers *lg) {
ctx->cacheUnlock = NULL;
+ ctx->file_cache_missed = 0;
+
return ctx;
}
@@ -3643,6 +3650,8 @@ int uw_commit(uw_context ctx) {
}
}
+ ctx->file_cache_missed = 0;
+
return 0;
}
@@ -5058,3 +5067,118 @@ int strcmp_nullsafe(const char *str1, const char *str2) {
else
return 1;
}
+
+static int is_valid_hash(uw_Basis_string hash) {
+ for (; *hash; ++hash)
+ if (!isxdigit(*hash))
+ return 0;
+
+ return 1;
+}
+
+uw_unit uw_Basis_cache_file(uw_context ctx, uw_Basis_blob contents) {
+ char *dir = ctx->app->file_cache, path[1024], tempfile[1024];
+ unsigned char *res, *hash;
+ char *hash_encoded;
+ int fd, len, i;
+ ssize_t written_so_far = 0;
+
+ if (!dir)
+ return uw_unit_v;
+
+ hash = uw_malloc(ctx, SHA512_DIGEST_LENGTH);
+ res = SHA512((unsigned char *)contents.data, contents.size, hash);
+ if (!res)
+ uw_error(ctx, FATAL, "Can't hash file contents");
+
+ hash_encoded = uw_malloc(ctx, SHA512_DIGEST_LENGTH * 2 + 1);
+ for (i = 0; i < SHA512_DIGEST_LENGTH; ++i)
+ sprintf(hash_encoded + 2 * i, "%02x", (int)hash[i]);
+ hash_encoded[SHA512_DIGEST_LENGTH * 2] = 0;
+
+ len = snprintf(tempfile, sizeof tempfile, "%s/tmpXXXXXX", dir);
+ if (len < 0 || len >= sizeof tempfile)
+ uw_error(ctx, FATAL, "Error assembling file path for cache (temporary)");
+
+ fd = mkstemp(tempfile);
+ if (fd < 0)
+ uw_error(ctx, FATAL, "Error creating temporary file for cache");
+
+ while (written_so_far < contents.size) {
+ ssize_t written_just_now = write(fd, contents.data + written_so_far, contents.size - written_so_far);
+ if (written_just_now <= 0) {
+ close(fd);
+ uw_error(ctx, FATAL, "Error writing all bytes to cached file");
+ }
+ written_so_far += written_just_now;
+ }
+
+ close(fd);
+
+ len = snprintf(path, sizeof path, "%s/%s", dir, hash_encoded);
+ if (len < 0 || len >= sizeof path)
+ uw_error(ctx, FATAL, "Error assembling file path for cache");
+
+ if (rename(tempfile, path))
+ uw_error(ctx, FATAL, "Error renaming temporary file into cache");
+
+ return uw_unit_v;
+}
+
+uw_Basis_blob uw_Basis_check_filecache(uw_context ctx, uw_Basis_string hash) {
+ char path[1024], *dir = ctx->app->file_cache, *filedata;
+ int len;
+ long size, read_so_far = 0;
+ FILE *fp;
+ uw_Basis_blob res;
+
+ // Hashes come formatted for printing by Postgres, which means they start with
+ // two extra characters. Let's remove them.
+ if (!hash[0] || !hash[1])
+ uw_error(ctx, FATAL, "Hash to check against file cache came in not in Postgres format: %s", hash);
+ hash += 2;
+
+ if (!dir)
+ uw_error(ctx, FATAL, "Checking file cache when no directory is set");
+
+ if (!is_valid_hash(hash))
+ uw_error(ctx, FATAL, "Checking file cache with invalid hash %s", hash);
+
+ len = snprintf(path, sizeof path, "%s/%s", dir, hash);
+ if (len < 0 || len >= sizeof path)
+ uw_error(ctx, FATAL, "Error assembling file path for cache");
+
+ fp = fopen(path, "r");
+ if (!fp) {
+ ctx->file_cache_missed = 1;
+ uw_error(ctx, UNLIMITED_RETRY, "Missed in the file cache for hash %s", hash);
+ }
+ uw_push_cleanup(ctx, (void (*)(void *))fclose, fp);
+
+ if (fseek(fp, 0L, SEEK_END))
+ uw_error(ctx, FATAL, "Error seeking to end of cached file");
+
+ size = ftell(fp);
+ if (size < 0)
+ uw_error(ctx, FATAL, "Error getting size of cached file");
+
+ rewind(fp);
+ filedata = uw_malloc(ctx, size);
+
+ while (read_so_far < size) {
+ size_t just_read = fread(filedata + read_so_far, 1, size - read_so_far, fp);
+ if (just_read <= 0)
+ uw_error(ctx, FATAL, "Error reading all bytes of cached file");
+ read_so_far += just_read;
+ }
+
+ uw_pop_cleanup(ctx);
+
+ res.size = size;
+ res.data = filedata;
+ return res;
+}
+
+uw_Basis_bool uw_Basis_filecache_missed(uw_context ctx) {
+ return !!(ctx->file_cache_missed);
+}
diff --git a/src/cjr_print.sml b/src/cjr_print.sml
index 43265fb8..c83da031 100644
--- a/src/cjr_print.sml
+++ b/src/cjr_print.sml
@@ -2189,6 +2189,25 @@ and p_exp' par tail env (e, loc) =
string ";"])
inputs,
newline,
+ case Settings.getFileCache () of
+ NONE => box []
+ | SOME _ =>
+ p_list_sepi newline
+ (fn i => fn (_, t) =>
+ case t of
+ Settings.Blob =>
+ box [string "uw_Basis_cache_file(ctx, arg",
+ string (Int.toString (i + 1)),
+ string ");"]
+ | Settings.Nullable Settings.Blob =>
+ box [string "if (arg",
+ string (Int.toString (i + 1)),
+ string ") uw_Basis_cache_file(ctx, arg",
+ string (Int.toString (i + 1)),
+ string ");"]
+ | _ => box [])
+ inputs,
+ newline,
string "uw_ensure_transaction(ctx);",
newline,
newline,
@@ -3677,7 +3696,10 @@ fun p_file env (ds, ps) =
"uw_input_num", "uw_cookie_sig", "uw_check_url", "uw_check_mime", "uw_check_requestHeader", "uw_check_responseHeader", "uw_check_envVar", "uw_check_meta",
case onError of NONE => "NULL" | SOME _ => "uw_onError", "my_periodics",
"\"" ^ Prim.toCString (Settings.getTimeFormat ()) ^ "\"",
- if Settings.getIsHtml5 () then "1" else "0"],
+ if Settings.getIsHtml5 () then "1" else "0",
+ (case Settings.getFileCache () of
+ NONE => "NULL"
+ | SOME s => "\"" ^ Prim.toCString s ^ "\"")],
string "};",
newline]
end
diff --git a/src/compiler.sig b/src/compiler.sig
index 0ff84f1c..bcf69fd4 100644
--- a/src/compiler.sig
+++ b/src/compiler.sig
@@ -60,6 +60,7 @@ signature COMPILER = sig
protocol : string option,
dbms : string option,
sigFile : string option,
+ fileCache : string option,
safeGets : string list,
onError : (string * string list * string) option,
minHeap : int,
@@ -125,6 +126,7 @@ signature COMPILER = sig
val pathcheck : (Mono.file, Mono.file) phase
val sidecheck : (Mono.file, Mono.file) phase
val sigcheck : (Mono.file, Mono.file) phase
+ val filecache : (Mono.file, Mono.file) phase
val sqlcache : (Mono.file, Mono.file) phase
val cjrize : (Mono.file, Cjr.file) phase
val prepare : (Cjr.file, Cjr.file) phase
@@ -191,6 +193,7 @@ signature COMPILER = sig
val toPathcheck : (string, Mono.file) transform
val toSidecheck : (string, Mono.file) transform
val toSigcheck : (string, Mono.file) transform
+ val toFilecache : (string, Mono.file) transform
val toSqlcache : (string, Mono.file) transform
val toCjrize : (string, Cjr.file) transform
val toPrepare : (string, Cjr.file) transform
diff --git a/src/compiler.sml b/src/compiler.sml
index 3fb0b767..f724bf56 100644
--- a/src/compiler.sml
+++ b/src/compiler.sml
@@ -64,6 +64,7 @@ type job = {
protocol : string option,
dbms : string option,
sigFile : string option,
+ fileCache : string option,
safeGets : string list,
onError : (string * string list * string) option,
minHeap : int,
@@ -388,6 +389,7 @@ fun institutionalizeJob (job : job) =
Settings.setOnError (#onError job);
Settings.setMinHeap (#minHeap job);
Settings.setSigFile (#sigFile job);
+ Settings.setFileCache (#fileCache job);
Settings.setMimeFilePath (Option.getOpt (#mimeTypes job, "/etc/mime.types")))
datatype commentableLine =
@@ -467,6 +469,7 @@ fun parseUrp' accLibs fname =
protocol = NONE,
dbms = NONE,
sigFile = NONE,
+ fileCache = NONE,
safeGets = [],
onError = NONE,
minHeap = 0,
@@ -601,6 +604,7 @@ fun parseUrp' accLibs fname =
val protocol = ref NONE
val dbms = ref NONE
val sigFile = ref (Settings.getSigFile ())
+ val fileCache = ref (Settings.getFileCache ())
val safeGets = ref []
val onError = ref NONE
val minHeap = ref 0
@@ -640,6 +644,7 @@ fun parseUrp' accLibs fname =
protocol = !protocol,
dbms = !dbms,
sigFile = !sigFile,
+ fileCache = !fileCache,
safeGets = rev (!safeGets),
onError = !onError,
minHeap = !minHeap,
@@ -702,6 +707,7 @@ fun parseUrp' accLibs fname =
protocol = mergeO #2 (#protocol old, #protocol new),
dbms = mergeO #2 (#dbms old, #dbms new),
sigFile = mergeO #2 (#sigFile old, #sigFile new),
+ fileCache = mergeO #2 (#fileCache old, #fileCache new),
safeGets = #safeGets old @ #safeGets new,
onError = mergeO #2 (#onError old, #onError new),
minHeap = Int.max (#minHeap old, #minHeap new),
@@ -790,6 +796,10 @@ fun parseUrp' accLibs fname =
(case !sigFile of
NONE => sigFile := SOME arg
| SOME _ => ())
+ | "filecache" =>
+ (case !fileCache of
+ NONE => fileCache := SOME arg
+ | SOME _ => ())
| "exe" =>
(case !exe of
NONE => exe := SOME (relify arg)
@@ -1513,6 +1523,13 @@ val sigcheck = {
val toSigcheck = transform sigcheck "sigcheck" o toSidecheck
+val filecache = {
+ func = FileCache.instrument,
+ print = MonoPrint.p_file MonoEnv.empty
+}
+
+val toFilecache = transform filecache "filecache" o toSigcheck
+
val sqlcache = {
func = (fn file =>
if Settings.getSqlcache ()
@@ -1521,7 +1538,7 @@ val sqlcache = {
print = MonoPrint.p_file MonoEnv.empty
}
-val toSqlcache = transform sqlcache "sqlcache" o toSigcheck
+val toSqlcache = transform sqlcache "sqlcache" o toFilecache
val cjrize = {
func = Cjrize.cjrize,
diff --git a/src/demo.sml b/src/demo.sml
index a682d28d..1e58e2f8 100644
--- a/src/demo.sml
+++ b/src/demo.sml
@@ -123,6 +123,7 @@ fun make' {prefix, dirname, guided} =
protocol = mergeWith #2 (#protocol combined, #protocol urp),
dbms = mergeWith #2 (#dbms combined, #dbms urp),
sigFile = mergeWith #2 (#sigFile combined, #sigFile urp),
+ fileCache = mergeWith #2 (#fileCache combined, #fileCache urp),
safeGets = #safeGets combined @ #safeGets urp,
onError = NONE,
minHeap = 0,
diff --git a/src/filecache.sig b/src/filecache.sig
new file mode 100644
index 00000000..db57135f
--- /dev/null
+++ b/src/filecache.sig
@@ -0,0 +1,35 @@
+(* Copyright (c) 2013, Adam Chlipala
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - The names of contributors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *)
+
+(* Instrument to check a cache in the file system, to reconsitute blobs without
+ * silly shipping over an SQL connection. *)
+
+signature FILE_CACHE = sig
+
+ val instrument : Mono.file -> Mono.file
+
+end
diff --git a/src/filecache.sml b/src/filecache.sml
new file mode 100644
index 00000000..e2291c10
--- /dev/null
+++ b/src/filecache.sml
@@ -0,0 +1,230 @@
+(* Copyright (c) 2013, Adam Chlipala
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - The names of contributors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *)
+
+structure FileCache :> FILE_CACHE = struct
+
+open Mono
+
+structure SS = BinarySetFn(struct
+ type ord_key = string
+ val compare = String.compare
+ end)
+
+val hasBlob =
+ MonoUtil.Typ.exists (fn TFfi ("Basis", "blob") => true
+ | _ => false)
+
+val unBlob =
+ MonoUtil.Typ.map (fn TFfi ("Basis", "blob") => TFfi ("Basis", "string")
+ | t => t)
+
+fun nodups (exps : (string * typ) list, tables : (string * (string * typ) list) list) =
+ let
+ val cols = map #1 exps @ ListUtil.mapConcat (map #1 o #2) tables
+
+ val (_, good) =
+ foldl (fn (name, (names, good)) =>
+ if SS.member(names, name) then
+ (names, false)
+ else
+ (SS.add (names, name), good)) (SS.empty, true) cols
+ in
+ good
+ end
+
+fun instrument file =
+ let
+ fun exp e =
+ case e of
+ EQuery {exps, tables, state, query, body, initial} =>
+ if (List.exists (hasBlob o #2) exps
+ orelse List.exists (List.exists (hasBlob o #2) o #2) tables)
+ andalso nodups (exps, tables) then
+ let
+ val exps = ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ exps
+ val tables = ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ tables
+ val tables = map (fn (x, xts) =>
+ (x, ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ xts)) tables
+
+ val loc = #2 query
+
+ fun wrapCol (name, t) =
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ "DIGEST(" ^ name ^ ", 'sha512')"
+ | TOption t' => wrapCol (name, t')
+ | _ => name
+
+ val mangle = Settings.mangleSql
+
+ val cols = map (fn (name, t) => (mangle name, t)) exps
+ @ ListUtil.mapConcat (fn (_, cols) =>
+ map (fn (name, t) =>
+ (mangle name,
+ t)) cols) tables
+
+ val prequery =
+ "SELECT "
+ ^ String.concatWith ", " (map wrapCol cols)
+ ^ " FROM ("
+
+ val postquery =
+ ") AS Wrap"
+
+ val wrapped_query =
+ (EStrcat ((EPrim (Prim.String (Prim.Normal, prequery)), loc),
+ (EStrcat (query,
+ (EPrim (Prim.String (Prim.Normal, postquery)), loc)), loc)), loc)
+ val wrapped_query = MonoOpt.optExp wrapped_query
+
+ val exps' = map (fn (name, t) => (name, unBlob t)) exps
+ val tables' = map (fn (name, cols) =>
+ (name,
+ map (fn (cname, t) => (cname, unBlob t)) cols)) tables
+
+ val blob = (TFfi ("Basis", "blob"), loc)
+ val string = (TFfi ("Basis", "string"), loc)
+
+ fun trycache (name, e, t : typ) =
+ (name,
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ (EFfiApp ("Basis",
+ "check_filecache",
+ [(e, string)]), loc)
+ | TOption (TFfi ("Basis", "blob"), _) =>
+ (ECase (e,
+ [((PNone string, loc),
+ (ENone blob, loc)),
+ ((PSome (string, (PVar ("hash", string), loc)), loc),
+ (ESome (blob,
+ (EFfiApp ("Basis",
+ "check_filecache",
+ [((ERel 0, loc), string)]), loc)), loc))],
+ {disc = (TOption string, loc),
+ result = (TOption blob, loc)}), loc)
+ | _ => e,
+ t)
+
+ val wrapped_body_trycache =
+ (ELet ("uncached",
+ (TRecord (exps @ map (fn (name, cols) =>
+ (name, (TRecord cols, loc))) tables),
+ loc),
+ (ERecord (map (fn (name, t) =>
+ trycache (name,
+ (EField ((ERel 1, loc),
+ name), loc),
+ t)) exps
+ @ map (fn (tname, cols) =>
+ (tname,
+ (ERecord (map (fn (name, t) =>
+ trycache (name,
+ (EField ((EField ((ERel 1, loc), tname), loc), name), loc),
+ t)) cols), loc),
+ (TRecord cols, loc))) tables), loc),
+ MonoEnv.subExpInExp (2, (ERel 0, loc))
+
+
+ (MonoEnv.liftExpInExp 0 body)), loc)
+
+ fun maybeadd (e, t, acc) =
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ (ESeq ((EFfiApp ("Basis",
+ "cache_file",
+ [(e, blob)]), loc),
+ acc), loc)
+ | TOption (TFfi ("Basis", "blob"), _) =>
+ (ESeq ((ECase (e,
+ [((PNone blob, loc),
+ (ERecord [], loc)),
+ ((PSome (blob, (PVar ("blob", blob), loc)), loc),
+ (EFfiApp ("Basis",
+ "cache_file",
+ [((ERel 0, loc), blob)]), loc))],
+ {disc = t,
+ result = (TRecord [], loc)}), loc),
+ acc), loc)
+ | _ => acc
+
+ val wrapped_body_addtocache =
+ foldl (fn ((name, t), e) =>
+ maybeadd ((EField ((ERel 1, loc), name), loc),
+ t, e))
+ (foldl (fn ((tname, cols), e) =>
+ foldl (fn ((name, t), e) =>
+ maybeadd ((EField ((EField ((ERel 1, loc), tname), loc), name), loc),
+ t, e)) e cols) body tables)
+ exps
+ in
+ ECase ((EFfiApp ("Basis", "filecache_missed", []), loc),
+ [((PCon (Enum,
+ PConFfi {mod = "Basis",
+ datatyp = "bool",
+ con = "False",
+ arg = NONE},
+ NONE), loc),
+ (EQuery {exps = exps',
+ tables = tables',
+ state = state,
+ query = wrapped_query,
+ body = wrapped_body_trycache,
+ initial = initial}, loc)),
+ ((PCon (Enum,
+ PConFfi {mod = "Basis",
+ datatyp = "bool",
+ con = "True",
+ arg = NONE},
+ NONE), loc),
+ (EQuery {exps = exps,
+ tables = tables,
+ state = state,
+ query = query,
+ body = wrapped_body_addtocache,
+ initial = initial}, loc))],
+ {disc = (TFfi ("Basis", "bool"), loc),
+ result = state})
+ end
+ else
+ e
+ | _ => e
+ in
+ case Settings.getFileCache () of
+ NONE => file
+ | SOME _ => MonoUtil.File.map {typ = fn t => t,
+ exp = exp,
+ decl = fn d => d} file
+ end
+
+end
diff --git a/src/mono_util.sml b/src/mono_util.sml
index fc1a2bcb..fdf48d20 100644
--- a/src/mono_util.sml
+++ b/src/mono_util.sml
@@ -107,16 +107,16 @@ fun mapfold fc =
| TOption t =>
S.map2 (mft t,
fn t' =>
- (TOption t, loc))
+ (TOption t', loc))
| TList t =>
S.map2 (mft t,
fn t' =>
- (TList t, loc))
+ (TList t', loc))
| TSource => S.return2 cAll
| TSignal t =>
S.map2 (mft t,
fn t' =>
- (TSignal t, loc))
+ (TSignal t', loc))
in
mft
end
diff --git a/src/mysql.sml b/src/mysql.sml
index 52e4921e..e7cad84e 100644
--- a/src/mysql.sml
+++ b/src/mysql.sml
@@ -1609,6 +1609,7 @@ val () = addDbms {name = "mysql",
onlyUnion = true,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = true}
+ supportsIsDistinctFrom = true,
+ supportsSHA512 = false}
end
diff --git a/src/postgres.sml b/src/postgres.sml
index fac913f0..2b6bee8c 100644
--- a/src/postgres.sml
+++ b/src/postgres.sml
@@ -1153,7 +1153,8 @@ val () = addDbms {name = "postgres",
onlyUnion = false,
nestedRelops = true,
windowFunctions = true,
- supportsIsDistinctFrom = true}
+ supportsIsDistinctFrom = true,
+ supportsSHA512 = true}
val () = setDbms "postgres"
diff --git a/src/settings.sig b/src/settings.sig
index 729218ac..986d6ed7 100644
--- a/src/settings.sig
+++ b/src/settings.sig
@@ -219,7 +219,8 @@ signature SETTINGS = sig
onlyUnion : bool,
nestedRelops : bool,
windowFunctions : bool,
- supportsIsDistinctFrom : bool
+ supportsIsDistinctFrom : bool,
+ supportsSHA512 : bool
}
val addDbms : dbms -> unit
@@ -253,6 +254,9 @@ signature SETTINGS = sig
val setSigFile : string option -> unit
val getSigFile : unit -> string option
+ val setFileCache : string option -> unit
+ val getFileCache : unit -> string option
+
(* Which GET-able functions should be allowed to have side effects? *)
val setSafeGets : string list -> unit
val isSafeGet : string -> bool
diff --git a/src/settings.sml b/src/settings.sml
index 9e6d3e76..47a88932 100644
--- a/src/settings.sml
+++ b/src/settings.sml
@@ -646,7 +646,8 @@ type dbms = {
onlyUnion : bool,
nestedRelops : bool,
windowFunctions: bool,
- supportsIsDistinctFrom : bool
+ supportsIsDistinctFrom : bool,
+ supportsSHA512 : bool
}
val dbmses = ref ([] : dbms list)
@@ -679,7 +680,8 @@ val curDb = ref ({name = "",
onlyUnion = false,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = false} : dbms)
+ supportsIsDistinctFrom = false,
+ supportsSHA512 = false} : dbms)
fun addDbms v = dbmses := v :: !dbmses
fun setDbms s =
@@ -724,6 +726,10 @@ val sigFile = ref (NONE : string option)
fun setSigFile v = sigFile := v
fun getSigFile () = !sigFile
+val fileCache = ref (NONE : string option)
+fun setFileCache v = fileCache := v
+fun getFileCache () = !fileCache
+
structure SS = BinarySetFn(struct
type ord_key = string
val compare = String.compare
diff --git a/src/sources b/src/sources
index 52b1bdd7..5c0b2a84 100644
--- a/src/sources
+++ b/src/sources
@@ -231,6 +231,9 @@ $(SRC)/sidecheck.sml
$(SRC)/sigcheck.sig
$(SRC)/sigcheck.sml
+$(SRC)/filecache.sig
+$(SRC)/filecache.sml
+
$(SRC)/mono_inline.sml
$(SRC)/sha1.sig
diff --git a/src/sqlite.sml b/src/sqlite.sml
index 0acd866b..db7052d1 100644
--- a/src/sqlite.sml
+++ b/src/sqlite.sml
@@ -855,6 +855,7 @@ val () = addDbms {name = "sqlite",
onlyUnion = false,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = false}
+ supportsIsDistinctFrom = false,
+ supportsSHA512 = false}
end