aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--doc/manual.tex1
-rw-r--r--include/urweb/types_cpp.h1
-rw-r--r--include/urweb/urweb_cpp.h6
-rw-r--r--lib/ur/basis.urs5
-rw-r--r--src/c/urweb.c131
-rw-r--r--src/cjr_print.sml24
-rw-r--r--src/compiler.sig3
-rw-r--r--src/compiler.sml19
-rw-r--r--src/demo.sml1
-rw-r--r--src/filecache.sig35
-rw-r--r--src/filecache.sml230
-rw-r--r--src/mono_util.sml6
-rw-r--r--src/mysql.sml3
-rw-r--r--src/postgres.sml3
-rw-r--r--src/settings.sig6
-rw-r--r--src/settings.sml15
-rw-r--r--src/sources3
-rw-r--r--src/sqlite.sml3
-rw-r--r--tests/dbupload.urp1
-rw-r--r--tests/dbuploadOpt.ur27
-rw-r--r--tests/dbuploadOpt.urp7
21 files changed, 506 insertions, 24 deletions
diff --git a/doc/manual.tex b/doc/manual.tex
index 985dab5b..857539db 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -151,6 +151,7 @@ Here is the complete list of directive forms. ``FFI'' stands for ``foreign func
\item \texttt{exe FILENAME} sets the filename to which to write the output executable. The default for file \texttt{P.urp} is \texttt{P.exe}.
\item \texttt{file URI FILENAME} asks for the application executable to respond to requests for \texttt{URI} by serving a snapshot of the contents of \texttt{FILENAME} as of compile time. That is, the file contents are baked into the executable. System file \texttt{/etc/mime.types} is consulted (again, at compile time) to figure out the right MIME type to suggest in the HTTP response.
\item \texttt{file URI FILENAME MIME-TYPE} works like the simpler form of \texttt{file}, but the proper MIME type for the file is given directly.
+\item \texttt{filecache PATH} sets a path to a directory to use for caching files stored in the SQL database. It can be expensive to schlep files back and forth between the database and an Ur/Web application, since database engines don't tend to be optimized for transferring large files. Ur/Web will still store the files in the database, as the ``version of record'' for your whole, consistent data set, but the application will try to query the database only in terms of cryptographic hashes, from which files can be retrieved from the cache. (This feature is currently only available for PostgreSQL, with the module \texttt{pgcrypto} installed, to drive SHA512 hashing. It would defeat the purpose to run the hashing operation in the application rather than the database engine!)
\item \texttt{ffi FILENAME} reads the file \texttt{FILENAME.urs} to determine the interface to a new FFI module. The name of the module is calculated from \texttt{FILENAME} in the same way as for normal source files. See the files \texttt{include/urweb/urweb\_cpp.h} and \texttt{src/c/urweb.c} for examples of C headers and implementations for FFI modules. In general, every type or value \texttt{Module.ident} becomes \texttt{uw\_Module\_ident} in C.
\item \texttt{html5} asks to generate HTML5 code, which primarily affects the first few lines of the output documents, like the \texttt{DOCTYPE}. This option is on by default.
\item \texttt{include FILENAME} adds \texttt{FILENAME} to the list of files to be \texttt{\#include}d in C sources. This is most useful for interfacing with new FFI modules.
diff --git a/include/urweb/types_cpp.h b/include/urweb/types_cpp.h
index 2fa473ac..0c546d1c 100644
--- a/include/urweb/types_cpp.h
+++ b/include/urweb/types_cpp.h
@@ -105,6 +105,7 @@ typedef struct {
uw_Basis_string time_format;
int is_html5;
+ char *file_cache;
} uw_app;
typedef struct {
diff --git a/include/urweb/urweb_cpp.h b/include/urweb/urweb_cpp.h
index 0d5f5e0e..5f1144b8 100644
--- a/include/urweb/urweb_cpp.h
+++ b/include/urweb/urweb_cpp.h
@@ -358,8 +358,6 @@ uw_Basis_string uw_Basis_timef(struct uw_context *, const char *fmt, uw_Basis_ti
uw_Basis_time uw_Basis_stringToTimef(struct uw_context *, const char *fmt, uw_Basis_string);
uw_Basis_time uw_Basis_stringToTimef_error(struct uw_context *, const char *fmt, uw_Basis_string);
-uw_Basis_string uw_Basis_crypt(struct uw_context *, uw_Basis_string key, uw_Basis_string salt);
-
uw_Basis_bool uw_Basis_eq_time(struct uw_context *, uw_Basis_time, uw_Basis_time);
uw_Basis_bool uw_Basis_lt_time(struct uw_context *, uw_Basis_time, uw_Basis_time);
uw_Basis_bool uw_Basis_le_time(struct uw_context *, uw_Basis_time, uw_Basis_time);
@@ -432,4 +430,8 @@ void *uw_Sqlcache_flush(struct uw_context *, uw_Sqlcache_Cache *, char **);
int strcmp_nullsafe(const char *, const char *);
+uw_unit uw_Basis_cache_file(struct uw_context *, uw_Basis_blob contents);
+uw_Basis_blob uw_Basis_check_filecache(struct uw_context *, uw_Basis_string hash);
+uw_Basis_bool uw_Basis_filecache_missed(struct uw_context *);
+
#endif
diff --git a/lib/ur/basis.urs b/lib/ur/basis.urs
index 6d71d00a..66cc0e50 100644
--- a/lib/ur/basis.urs
+++ b/lib/ur/basis.urs
@@ -192,11 +192,6 @@ val datetimeSecond : time -> int
val datetimeDayOfWeek : time -> int
-(** * Encryption *)
-
-val crypt : string -> string -> string
-
-
(** HTTP operations *)
con http_cookie :: Type -> Type
diff --git a/src/c/urweb.c b/src/c/urweb.c
index 504597ef..e7efae38 100644
--- a/src/c/urweb.c
+++ b/src/c/urweb.c
@@ -13,8 +13,8 @@
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
-#include <openssl/des.h>
#include <openssl/rand.h>
+#include <openssl/sha.h>
#include <time.h>
#include <math.h>
@@ -514,6 +514,11 @@ struct uw_context {
uw_Sqlcache_Unlock *cacheUnlock;
int remoteSock;
+
+ int file_cache_missed;
+ // Set if we are recovering from a miss in the file cache in handling an SQL
+ // query that only returns hashes of files. If so, this time around we will
+ // run queries to return actual file contents instead.
};
size_t uw_headers_max = SIZE_MAX;
@@ -608,6 +613,8 @@ uw_context uw_init(int id, uw_loggers *lg) {
ctx->cacheUnlock = NULL;
+ ctx->file_cache_missed = 0;
+
return ctx;
}
@@ -3643,6 +3650,8 @@ int uw_commit(uw_context ctx) {
}
}
+ ctx->file_cache_missed = 0;
+
return 0;
}
@@ -4490,11 +4499,6 @@ failure_kind uw_runCallback(uw_context ctx, void (*callback)(uw_context)) {
return r;
}
-uw_Basis_string uw_Basis_crypt(uw_context ctx, uw_Basis_string key, uw_Basis_string salt) {
- char buf[14];
- return uw_strdup(ctx, DES_fcrypt(key, salt, buf));
-}
-
uw_Basis_bool uw_Basis_eq_time(uw_context ctx, uw_Basis_time t1, uw_Basis_time t2) {
(void)ctx;
return !!(t1.seconds == t2.seconds && t1.microseconds == t2.microseconds);
@@ -5063,3 +5067,118 @@ int strcmp_nullsafe(const char *str1, const char *str2) {
else
return 1;
}
+
+static int is_valid_hash(uw_Basis_string hash) {
+ for (; *hash; ++hash)
+ if (!isxdigit(*hash))
+ return 0;
+
+ return 1;
+}
+
+uw_unit uw_Basis_cache_file(uw_context ctx, uw_Basis_blob contents) {
+ char *dir = ctx->app->file_cache, path[1024], tempfile[1024];
+ unsigned char *res, *hash;
+ char *hash_encoded;
+ int fd, len, i;
+ ssize_t written_so_far = 0;
+
+ if (!dir)
+ return uw_unit_v;
+
+ hash = uw_malloc(ctx, SHA512_DIGEST_LENGTH);
+ res = SHA512((unsigned char *)contents.data, contents.size, hash);
+ if (!res)
+ uw_error(ctx, FATAL, "Can't hash file contents");
+
+ hash_encoded = uw_malloc(ctx, SHA512_DIGEST_LENGTH * 2 + 1);
+ for (i = 0; i < SHA512_DIGEST_LENGTH; ++i)
+ sprintf(hash_encoded + 2 * i, "%02x", (int)hash[i]);
+ hash_encoded[SHA512_DIGEST_LENGTH * 2] = 0;
+
+ len = snprintf(tempfile, sizeof tempfile, "%s/tmpXXXXXX", dir);
+ if (len < 0 || len >= sizeof tempfile)
+ uw_error(ctx, FATAL, "Error assembling file path for cache (temporary)");
+
+ fd = mkstemp(tempfile);
+ if (fd < 0)
+ uw_error(ctx, FATAL, "Error creating temporary file for cache");
+
+ while (written_so_far < contents.size) {
+ ssize_t written_just_now = write(fd, contents.data + written_so_far, contents.size - written_so_far);
+ if (written_just_now <= 0) {
+ close(fd);
+ uw_error(ctx, FATAL, "Error writing all bytes to cached file");
+ }
+ written_so_far += written_just_now;
+ }
+
+ close(fd);
+
+ len = snprintf(path, sizeof path, "%s/%s", dir, hash_encoded);
+ if (len < 0 || len >= sizeof path)
+ uw_error(ctx, FATAL, "Error assembling file path for cache");
+
+ if (rename(tempfile, path))
+ uw_error(ctx, FATAL, "Error renaming temporary file into cache");
+
+ return uw_unit_v;
+}
+
+uw_Basis_blob uw_Basis_check_filecache(uw_context ctx, uw_Basis_string hash) {
+ char path[1024], *dir = ctx->app->file_cache, *filedata;
+ int len;
+ long size, read_so_far = 0;
+ FILE *fp;
+ uw_Basis_blob res;
+
+ // Hashes come formatted for printing by Postgres, which means they start with
+ // two extra characters. Let's remove them.
+ if (!hash[0] || !hash[1])
+ uw_error(ctx, FATAL, "Hash to check against file cache came in not in Postgres format: %s", hash);
+ hash += 2;
+
+ if (!dir)
+ uw_error(ctx, FATAL, "Checking file cache when no directory is set");
+
+ if (!is_valid_hash(hash))
+ uw_error(ctx, FATAL, "Checking file cache with invalid hash %s", hash);
+
+ len = snprintf(path, sizeof path, "%s/%s", dir, hash);
+ if (len < 0 || len >= sizeof path)
+ uw_error(ctx, FATAL, "Error assembling file path for cache");
+
+ fp = fopen(path, "r");
+ if (!fp) {
+ ctx->file_cache_missed = 1;
+ uw_error(ctx, UNLIMITED_RETRY, "Missed in the file cache for hash %s", hash);
+ }
+ uw_push_cleanup(ctx, (void (*)(void *))fclose, fp);
+
+ if (fseek(fp, 0L, SEEK_END))
+ uw_error(ctx, FATAL, "Error seeking to end of cached file");
+
+ size = ftell(fp);
+ if (size < 0)
+ uw_error(ctx, FATAL, "Error getting size of cached file");
+
+ rewind(fp);
+ filedata = uw_malloc(ctx, size);
+
+ while (read_so_far < size) {
+ size_t just_read = fread(filedata + read_so_far, 1, size - read_so_far, fp);
+ if (just_read <= 0)
+ uw_error(ctx, FATAL, "Error reading all bytes of cached file");
+ read_so_far += just_read;
+ }
+
+ uw_pop_cleanup(ctx);
+
+ res.size = size;
+ res.data = filedata;
+ return res;
+}
+
+uw_Basis_bool uw_Basis_filecache_missed(uw_context ctx) {
+ return !!(ctx->file_cache_missed);
+}
diff --git a/src/cjr_print.sml b/src/cjr_print.sml
index 43265fb8..c83da031 100644
--- a/src/cjr_print.sml
+++ b/src/cjr_print.sml
@@ -2189,6 +2189,25 @@ and p_exp' par tail env (e, loc) =
string ";"])
inputs,
newline,
+ case Settings.getFileCache () of
+ NONE => box []
+ | SOME _ =>
+ p_list_sepi newline
+ (fn i => fn (_, t) =>
+ case t of
+ Settings.Blob =>
+ box [string "uw_Basis_cache_file(ctx, arg",
+ string (Int.toString (i + 1)),
+ string ");"]
+ | Settings.Nullable Settings.Blob =>
+ box [string "if (arg",
+ string (Int.toString (i + 1)),
+ string ") uw_Basis_cache_file(ctx, arg",
+ string (Int.toString (i + 1)),
+ string ");"]
+ | _ => box [])
+ inputs,
+ newline,
string "uw_ensure_transaction(ctx);",
newline,
newline,
@@ -3677,7 +3696,10 @@ fun p_file env (ds, ps) =
"uw_input_num", "uw_cookie_sig", "uw_check_url", "uw_check_mime", "uw_check_requestHeader", "uw_check_responseHeader", "uw_check_envVar", "uw_check_meta",
case onError of NONE => "NULL" | SOME _ => "uw_onError", "my_periodics",
"\"" ^ Prim.toCString (Settings.getTimeFormat ()) ^ "\"",
- if Settings.getIsHtml5 () then "1" else "0"],
+ if Settings.getIsHtml5 () then "1" else "0",
+ (case Settings.getFileCache () of
+ NONE => "NULL"
+ | SOME s => "\"" ^ Prim.toCString s ^ "\"")],
string "};",
newline]
end
diff --git a/src/compiler.sig b/src/compiler.sig
index 0ff84f1c..bcf69fd4 100644
--- a/src/compiler.sig
+++ b/src/compiler.sig
@@ -60,6 +60,7 @@ signature COMPILER = sig
protocol : string option,
dbms : string option,
sigFile : string option,
+ fileCache : string option,
safeGets : string list,
onError : (string * string list * string) option,
minHeap : int,
@@ -125,6 +126,7 @@ signature COMPILER = sig
val pathcheck : (Mono.file, Mono.file) phase
val sidecheck : (Mono.file, Mono.file) phase
val sigcheck : (Mono.file, Mono.file) phase
+ val filecache : (Mono.file, Mono.file) phase
val sqlcache : (Mono.file, Mono.file) phase
val cjrize : (Mono.file, Cjr.file) phase
val prepare : (Cjr.file, Cjr.file) phase
@@ -191,6 +193,7 @@ signature COMPILER = sig
val toPathcheck : (string, Mono.file) transform
val toSidecheck : (string, Mono.file) transform
val toSigcheck : (string, Mono.file) transform
+ val toFilecache : (string, Mono.file) transform
val toSqlcache : (string, Mono.file) transform
val toCjrize : (string, Cjr.file) transform
val toPrepare : (string, Cjr.file) transform
diff --git a/src/compiler.sml b/src/compiler.sml
index 3fb0b767..f724bf56 100644
--- a/src/compiler.sml
+++ b/src/compiler.sml
@@ -64,6 +64,7 @@ type job = {
protocol : string option,
dbms : string option,
sigFile : string option,
+ fileCache : string option,
safeGets : string list,
onError : (string * string list * string) option,
minHeap : int,
@@ -388,6 +389,7 @@ fun institutionalizeJob (job : job) =
Settings.setOnError (#onError job);
Settings.setMinHeap (#minHeap job);
Settings.setSigFile (#sigFile job);
+ Settings.setFileCache (#fileCache job);
Settings.setMimeFilePath (Option.getOpt (#mimeTypes job, "/etc/mime.types")))
datatype commentableLine =
@@ -467,6 +469,7 @@ fun parseUrp' accLibs fname =
protocol = NONE,
dbms = NONE,
sigFile = NONE,
+ fileCache = NONE,
safeGets = [],
onError = NONE,
minHeap = 0,
@@ -601,6 +604,7 @@ fun parseUrp' accLibs fname =
val protocol = ref NONE
val dbms = ref NONE
val sigFile = ref (Settings.getSigFile ())
+ val fileCache = ref (Settings.getFileCache ())
val safeGets = ref []
val onError = ref NONE
val minHeap = ref 0
@@ -640,6 +644,7 @@ fun parseUrp' accLibs fname =
protocol = !protocol,
dbms = !dbms,
sigFile = !sigFile,
+ fileCache = !fileCache,
safeGets = rev (!safeGets),
onError = !onError,
minHeap = !minHeap,
@@ -702,6 +707,7 @@ fun parseUrp' accLibs fname =
protocol = mergeO #2 (#protocol old, #protocol new),
dbms = mergeO #2 (#dbms old, #dbms new),
sigFile = mergeO #2 (#sigFile old, #sigFile new),
+ fileCache = mergeO #2 (#fileCache old, #fileCache new),
safeGets = #safeGets old @ #safeGets new,
onError = mergeO #2 (#onError old, #onError new),
minHeap = Int.max (#minHeap old, #minHeap new),
@@ -790,6 +796,10 @@ fun parseUrp' accLibs fname =
(case !sigFile of
NONE => sigFile := SOME arg
| SOME _ => ())
+ | "filecache" =>
+ (case !fileCache of
+ NONE => fileCache := SOME arg
+ | SOME _ => ())
| "exe" =>
(case !exe of
NONE => exe := SOME (relify arg)
@@ -1513,6 +1523,13 @@ val sigcheck = {
val toSigcheck = transform sigcheck "sigcheck" o toSidecheck
+val filecache = {
+ func = FileCache.instrument,
+ print = MonoPrint.p_file MonoEnv.empty
+}
+
+val toFilecache = transform filecache "filecache" o toSigcheck
+
val sqlcache = {
func = (fn file =>
if Settings.getSqlcache ()
@@ -1521,7 +1538,7 @@ val sqlcache = {
print = MonoPrint.p_file MonoEnv.empty
}
-val toSqlcache = transform sqlcache "sqlcache" o toSigcheck
+val toSqlcache = transform sqlcache "sqlcache" o toFilecache
val cjrize = {
func = Cjrize.cjrize,
diff --git a/src/demo.sml b/src/demo.sml
index a682d28d..1e58e2f8 100644
--- a/src/demo.sml
+++ b/src/demo.sml
@@ -123,6 +123,7 @@ fun make' {prefix, dirname, guided} =
protocol = mergeWith #2 (#protocol combined, #protocol urp),
dbms = mergeWith #2 (#dbms combined, #dbms urp),
sigFile = mergeWith #2 (#sigFile combined, #sigFile urp),
+ fileCache = mergeWith #2 (#fileCache combined, #fileCache urp),
safeGets = #safeGets combined @ #safeGets urp,
onError = NONE,
minHeap = 0,
diff --git a/src/filecache.sig b/src/filecache.sig
new file mode 100644
index 00000000..db57135f
--- /dev/null
+++ b/src/filecache.sig
@@ -0,0 +1,35 @@
+(* Copyright (c) 2013, Adam Chlipala
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - The names of contributors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *)
+
+(* Instrument to check a cache in the file system, to reconsitute blobs without
+ * silly shipping over an SQL connection. *)
+
+signature FILE_CACHE = sig
+
+ val instrument : Mono.file -> Mono.file
+
+end
diff --git a/src/filecache.sml b/src/filecache.sml
new file mode 100644
index 00000000..e2291c10
--- /dev/null
+++ b/src/filecache.sml
@@ -0,0 +1,230 @@
+(* Copyright (c) 2013, Adam Chlipala
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - The names of contributors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *)
+
+structure FileCache :> FILE_CACHE = struct
+
+open Mono
+
+structure SS = BinarySetFn(struct
+ type ord_key = string
+ val compare = String.compare
+ end)
+
+val hasBlob =
+ MonoUtil.Typ.exists (fn TFfi ("Basis", "blob") => true
+ | _ => false)
+
+val unBlob =
+ MonoUtil.Typ.map (fn TFfi ("Basis", "blob") => TFfi ("Basis", "string")
+ | t => t)
+
+fun nodups (exps : (string * typ) list, tables : (string * (string * typ) list) list) =
+ let
+ val cols = map #1 exps @ ListUtil.mapConcat (map #1 o #2) tables
+
+ val (_, good) =
+ foldl (fn (name, (names, good)) =>
+ if SS.member(names, name) then
+ (names, false)
+ else
+ (SS.add (names, name), good)) (SS.empty, true) cols
+ in
+ good
+ end
+
+fun instrument file =
+ let
+ fun exp e =
+ case e of
+ EQuery {exps, tables, state, query, body, initial} =>
+ if (List.exists (hasBlob o #2) exps
+ orelse List.exists (List.exists (hasBlob o #2) o #2) tables)
+ andalso nodups (exps, tables) then
+ let
+ val exps = ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ exps
+ val tables = ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ tables
+ val tables = map (fn (x, xts) =>
+ (x, ListMergeSort.sort
+ (fn ((x, _), (y, _)) => String.compare (x, y) = GREATER)
+ xts)) tables
+
+ val loc = #2 query
+
+ fun wrapCol (name, t) =
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ "DIGEST(" ^ name ^ ", 'sha512')"
+ | TOption t' => wrapCol (name, t')
+ | _ => name
+
+ val mangle = Settings.mangleSql
+
+ val cols = map (fn (name, t) => (mangle name, t)) exps
+ @ ListUtil.mapConcat (fn (_, cols) =>
+ map (fn (name, t) =>
+ (mangle name,
+ t)) cols) tables
+
+ val prequery =
+ "SELECT "
+ ^ String.concatWith ", " (map wrapCol cols)
+ ^ " FROM ("
+
+ val postquery =
+ ") AS Wrap"
+
+ val wrapped_query =
+ (EStrcat ((EPrim (Prim.String (Prim.Normal, prequery)), loc),
+ (EStrcat (query,
+ (EPrim (Prim.String (Prim.Normal, postquery)), loc)), loc)), loc)
+ val wrapped_query = MonoOpt.optExp wrapped_query
+
+ val exps' = map (fn (name, t) => (name, unBlob t)) exps
+ val tables' = map (fn (name, cols) =>
+ (name,
+ map (fn (cname, t) => (cname, unBlob t)) cols)) tables
+
+ val blob = (TFfi ("Basis", "blob"), loc)
+ val string = (TFfi ("Basis", "string"), loc)
+
+ fun trycache (name, e, t : typ) =
+ (name,
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ (EFfiApp ("Basis",
+ "check_filecache",
+ [(e, string)]), loc)
+ | TOption (TFfi ("Basis", "blob"), _) =>
+ (ECase (e,
+ [((PNone string, loc),
+ (ENone blob, loc)),
+ ((PSome (string, (PVar ("hash", string), loc)), loc),
+ (ESome (blob,
+ (EFfiApp ("Basis",
+ "check_filecache",
+ [((ERel 0, loc), string)]), loc)), loc))],
+ {disc = (TOption string, loc),
+ result = (TOption blob, loc)}), loc)
+ | _ => e,
+ t)
+
+ val wrapped_body_trycache =
+ (ELet ("uncached",
+ (TRecord (exps @ map (fn (name, cols) =>
+ (name, (TRecord cols, loc))) tables),
+ loc),
+ (ERecord (map (fn (name, t) =>
+ trycache (name,
+ (EField ((ERel 1, loc),
+ name), loc),
+ t)) exps
+ @ map (fn (tname, cols) =>
+ (tname,
+ (ERecord (map (fn (name, t) =>
+ trycache (name,
+ (EField ((EField ((ERel 1, loc), tname), loc), name), loc),
+ t)) cols), loc),
+ (TRecord cols, loc))) tables), loc),
+ MonoEnv.subExpInExp (2, (ERel 0, loc))
+
+
+ (MonoEnv.liftExpInExp 0 body)), loc)
+
+ fun maybeadd (e, t, acc) =
+ case #1 t of
+ TFfi ("Basis", "blob") =>
+ (ESeq ((EFfiApp ("Basis",
+ "cache_file",
+ [(e, blob)]), loc),
+ acc), loc)
+ | TOption (TFfi ("Basis", "blob"), _) =>
+ (ESeq ((ECase (e,
+ [((PNone blob, loc),
+ (ERecord [], loc)),
+ ((PSome (blob, (PVar ("blob", blob), loc)), loc),
+ (EFfiApp ("Basis",
+ "cache_file",
+ [((ERel 0, loc), blob)]), loc))],
+ {disc = t,
+ result = (TRecord [], loc)}), loc),
+ acc), loc)
+ | _ => acc
+
+ val wrapped_body_addtocache =
+ foldl (fn ((name, t), e) =>
+ maybeadd ((EField ((ERel 1, loc), name), loc),
+ t, e))
+ (foldl (fn ((tname, cols), e) =>
+ foldl (fn ((name, t), e) =>
+ maybeadd ((EField ((EField ((ERel 1, loc), tname), loc), name), loc),
+ t, e)) e cols) body tables)
+ exps
+ in
+ ECase ((EFfiApp ("Basis", "filecache_missed", []), loc),
+ [((PCon (Enum,
+ PConFfi {mod = "Basis",
+ datatyp = "bool",
+ con = "False",
+ arg = NONE},
+ NONE), loc),
+ (EQuery {exps = exps',
+ tables = tables',
+ state = state,
+ query = wrapped_query,
+ body = wrapped_body_trycache,
+ initial = initial}, loc)),
+ ((PCon (Enum,
+ PConFfi {mod = "Basis",
+ datatyp = "bool",
+ con = "True",
+ arg = NONE},
+ NONE), loc),
+ (EQuery {exps = exps,
+ tables = tables,
+ state = state,
+ query = query,
+ body = wrapped_body_addtocache,
+ initial = initial}, loc))],
+ {disc = (TFfi ("Basis", "bool"), loc),
+ result = state})
+ end
+ else
+ e
+ | _ => e
+ in
+ case Settings.getFileCache () of
+ NONE => file
+ | SOME _ => MonoUtil.File.map {typ = fn t => t,
+ exp = exp,
+ decl = fn d => d} file
+ end
+
+end
diff --git a/src/mono_util.sml b/src/mono_util.sml
index fc1a2bcb..fdf48d20 100644
--- a/src/mono_util.sml
+++ b/src/mono_util.sml
@@ -107,16 +107,16 @@ fun mapfold fc =
| TOption t =>
S.map2 (mft t,
fn t' =>
- (TOption t, loc))
+ (TOption t', loc))
| TList t =>
S.map2 (mft t,
fn t' =>
- (TList t, loc))
+ (TList t', loc))
| TSource => S.return2 cAll
| TSignal t =>
S.map2 (mft t,
fn t' =>
- (TSignal t, loc))
+ (TSignal t', loc))
in
mft
end
diff --git a/src/mysql.sml b/src/mysql.sml
index 52e4921e..e7cad84e 100644
--- a/src/mysql.sml
+++ b/src/mysql.sml
@@ -1609,6 +1609,7 @@ val () = addDbms {name = "mysql",
onlyUnion = true,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = true}
+ supportsIsDistinctFrom = true,
+ supportsSHA512 = false}
end
diff --git a/src/postgres.sml b/src/postgres.sml
index fac913f0..2b6bee8c 100644
--- a/src/postgres.sml
+++ b/src/postgres.sml
@@ -1153,7 +1153,8 @@ val () = addDbms {name = "postgres",
onlyUnion = false,
nestedRelops = true,
windowFunctions = true,
- supportsIsDistinctFrom = true}
+ supportsIsDistinctFrom = true,
+ supportsSHA512 = true}
val () = setDbms "postgres"
diff --git a/src/settings.sig b/src/settings.sig
index 729218ac..986d6ed7 100644
--- a/src/settings.sig
+++ b/src/settings.sig
@@ -219,7 +219,8 @@ signature SETTINGS = sig
onlyUnion : bool,
nestedRelops : bool,
windowFunctions : bool,
- supportsIsDistinctFrom : bool
+ supportsIsDistinctFrom : bool,
+ supportsSHA512 : bool
}
val addDbms : dbms -> unit
@@ -253,6 +254,9 @@ signature SETTINGS = sig
val setSigFile : string option -> unit
val getSigFile : unit -> string option
+ val setFileCache : string option -> unit
+ val getFileCache : unit -> string option
+
(* Which GET-able functions should be allowed to have side effects? *)
val setSafeGets : string list -> unit
val isSafeGet : string -> bool
diff --git a/src/settings.sml b/src/settings.sml
index 9e6d3e76..cfbe98a5 100644
--- a/src/settings.sml
+++ b/src/settings.sml
@@ -646,7 +646,8 @@ type dbms = {
onlyUnion : bool,
nestedRelops : bool,
windowFunctions: bool,
- supportsIsDistinctFrom : bool
+ supportsIsDistinctFrom : bool,
+ supportsSHA512 : bool
}
val dbmses = ref ([] : dbms list)
@@ -679,7 +680,8 @@ val curDb = ref ({name = "",
onlyUnion = false,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = false} : dbms)
+ supportsIsDistinctFrom = false,
+ supportsSHA512 = false} : dbms)
fun addDbms v = dbmses := v :: !dbmses
fun setDbms s =
@@ -724,6 +726,15 @@ val sigFile = ref (NONE : string option)
fun setSigFile v = sigFile := v
fun getSigFile () = !sigFile
+val fileCache = ref (NONE : string option)
+fun setFileCache v =
+ (if Option.isSome v andalso not (#supportsSHA512 (currentDbms ())) then
+ ErrorMsg.error "The selected database engine is incompatible with file caching."
+ else
+ ();
+ fileCache := v)
+fun getFileCache () = !fileCache
+
structure SS = BinarySetFn(struct
type ord_key = string
val compare = String.compare
diff --git a/src/sources b/src/sources
index 52b1bdd7..5c0b2a84 100644
--- a/src/sources
+++ b/src/sources
@@ -231,6 +231,9 @@ $(SRC)/sidecheck.sml
$(SRC)/sigcheck.sig
$(SRC)/sigcheck.sml
+$(SRC)/filecache.sig
+$(SRC)/filecache.sml
+
$(SRC)/mono_inline.sml
$(SRC)/sha1.sig
diff --git a/src/sqlite.sml b/src/sqlite.sml
index 0acd866b..db7052d1 100644
--- a/src/sqlite.sml
+++ b/src/sqlite.sml
@@ -855,6 +855,7 @@ val () = addDbms {name = "sqlite",
onlyUnion = false,
nestedRelops = false,
windowFunctions = false,
- supportsIsDistinctFrom = false}
+ supportsIsDistinctFrom = false,
+ supportsSHA512 = false}
end
diff --git a/tests/dbupload.urp b/tests/dbupload.urp
index dd8417d1..daa68e2c 100644
--- a/tests/dbupload.urp
+++ b/tests/dbupload.urp
@@ -2,5 +2,6 @@ database dbname=dbupload
sql dbupload.sql
allow mime *
rewrite all Dbupload/*
+filecache /tmp/files
dbupload
diff --git a/tests/dbuploadOpt.ur b/tests/dbuploadOpt.ur
new file mode 100644
index 00000000..466b49f3
--- /dev/null
+++ b/tests/dbuploadOpt.ur
@@ -0,0 +1,27 @@
+table t : { Id : int, Blob : option blob, MimeType : string }
+sequence s
+
+fun getImage id : transaction page =
+ r <- oneRow1 (SELECT t.Blob, t.MimeType
+ FROM t
+ WHERE t.Id = {[id]});
+ case r.Blob of
+ None => error <xml>Oh no!</xml>
+ | Some blob => returnBlob blob (blessMime r.MimeType)
+
+fun main () : transaction page =
+ let
+ fun handle r =
+ id <- nextval s;
+ dml (INSERT INTO t (Id, Blob, MimeType)
+ VALUES ({[id]}, {[if fileMimeType r.File = "image/jpeg" then Some (fileData r.File) else None]}, {[fileMimeType r.File]}));
+ main ()
+ in
+ x <- queryX1 (SELECT t.Id FROM t)
+ (fn r => <xml><img src={url (getImage r.Id)}/><br/></xml>);
+ return <xml><body>
+ <form><upload{#File}/> <submit action={handle}/></form>
+ <hr/>
+ {x}
+ </body></xml>
+ end
diff --git a/tests/dbuploadOpt.urp b/tests/dbuploadOpt.urp
new file mode 100644
index 00000000..816bcea1
--- /dev/null
+++ b/tests/dbuploadOpt.urp
@@ -0,0 +1,7 @@
+database dbname=dbuploadOpt
+sql dbuploadOpt.sql
+allow mime *
+rewrite all DbuploadOpt/*
+filecache /tmp/files
+
+dbuploadOpt