From 61bd40e1af8c3f7ace2a09068557ac7c05662b69 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 14:38:53 -0500
Subject: Start of manual
---
doc/Makefile | 23 ++++++++++++++++
doc/manual.tex | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 108 insertions(+)
create mode 100644 doc/Makefile
create mode 100644 doc/manual.tex
(limited to 'doc')
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 00000000..777c5bf7
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,23 @@
+PAPERS=manual
+
+FIGURES=
+
+all: $(PAPERS:%=%.dvi) $(PAPERS:%=%.ps) $(PAPERS:%=%.pdf)
+
+%.dvi: %.tex $(FIGURES:%=%.eps)
+ latex $<
+ latex $<
+
+%.ps: %.dvi
+ dvips $< -o $@
+
+%.pdf: %.dvi $(FIGURES:%=%.pdf)
+ pdflatex $(<:%.dvi=%)
+
+%.pdf: %.eps
+ epstopdf $<
+
+clean:
+ rm -f *.aux *.bbl *.blg *.dvi *.log *.pdf *.ps
+
+.PHONY: all clean
diff --git a/doc/manual.tex b/doc/manual.tex
new file mode 100644
index 00000000..8517206a
--- /dev/null
+++ b/doc/manual.tex
@@ -0,0 +1,85 @@
+\documentclass{article}
+\usepackage{fullpage,amsmath,amssymb,proof}
+
+\newcommand{\cd}[1]{\texttt{#1}}
+\newcommand{\mt}[1]{\mathsf{#1}}
+
+\newcommand{\rc}{+ \hspace{-.075in} + \;}
+
+\begin{document}
+
+\title{The Ur/Web Manual}
+\author{Adam Chlipala}
+
+\maketitle
+
+\section{Syntax}
+
+\subsection{Lexical Conventions}
+
+We give the Ur language definition in \LaTeX $\;$ math mode, since that is prettier than monospaced ASCII. The corresponding ASCII syntax can be read off directly. Here is the key for mapping math symbols to ASCII character sequences.
+
+\begin{center}
+ \begin{tabular}{rl}
+ \textbf{\LaTeX} & \textbf{ASCII} \\
+ $\to$ & \cd{->} \\
+ $\times$ & \cd{*} \\
+ $\lambda$ & \cd{fn} \\
+ $\Rightarrow$ & \cd{=>} \\
+ $\rc$ & \cd{++} \\
+ \\
+ $x$ & Normal textual identifier, not beginning with an uppercase letter \\
+ $\alpha$ & Normal textual identifier, not beginning with an uppercase letter \\
+ $f$ & Normal textual identifier, beginning with an uppercase letter \\
+ \end{tabular}
+\end{center}
+
+We often write syntax like $N, \cdots, N$ to stand for the non-terminal $N$ repeated 0 or more times. That is, the $\cdots$ symbol is not translated literally to ASCII.
+
+\subsection{Core Syntax}
+
+\emph{Kinds} classify types and other compile-time-only entities. Each kind in the grammar is listed with a description of the sort of data it classifies.
+$$\begin{array}{rrcll}
+ \textrm{Kinds} & \kappa &::=& \mt{Type} & \textrm{proper types} \\
+ &&& \mid \mt{Unit} & \textrm{the trivial constructor} \\
+ &&& \mid \mt{Name} & \textrm{field names} \\
+ &&& \mid \kappa \to \kappa & \textrm{type-level functions} \\
+ &&& \mid \{\kappa\} & \textrm{type-level records} \\
+ &&& \mid (\kappa \times \cdots \times \kappa) & \textrm{type-level tuples} \\
+ &&& \mid (\kappa) & \textrm{explicit precedence} \\
+\end{array}$$
+
+Ur supports several different notions of functions that take types as arguments. These arguments can be either implicit, causing them to be inferred at use sites; or explicit, forcing them to be specified manually at use sites. There is a common explicitness annotation convention applied at the definitions of and in the types of such functions.
+$$\begin{array}{rrcll}
+ \textrm{Explicitness} & ? &::=& :: & \textrm{explicit} \\
+ &&& \mid \; ::: & \textrm{implicit}
+\end{array}$$
+
+\emph{Constructors} are the main class of compile-time-only data. They include proper types and are classified by kinds.
+$$\begin{array}{rrcll}
+ \textrm{Constructors} & c, \tau &::=& (c) :: \kappa & \textrm{kind annotation} \\
+ &&& \mid \alpha & \textrm{constructor variable} \\
+ \\
+ &&& \mid \tau \to \tau & \textrm{function type} \\
+ &&& \mid \alpha \; ? \; \kappa \to \tau & \textrm{polymorphic function type} \\
+ &&& \mid \$ c & \textrm{record type} \\
+ \\
+ &&& \mid c \; c & \textrm{type-level function application} \\
+ &&& \mid \lambda \alpha \; ? \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
+ \\
+ &&& \mid () & \textrm{type-level unit} \\
+ &&& \mid \#f & \textrm{field name} \\
+ \\
+ &&& \mid [c = c, \cdots, c = c] & \textrm{known-length type-level record} \\
+ &&& \mid c \rc c & \textrm{type-level record concatenation} \\
+ &&& \mid \mt{fold} & \textrm{type-level record fold} \\
+ \\
+ &&& \mid (c, \cdots, c) & \textrm{type-level tuple} \\
+ &&& \mid c.n & \textrm{type-level tuple projection ($n \in \mathbb N^+$)} \\
+ \\
+ &&& \mid \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
+ \\
+ &&& \mid (c) & \textrm{explicit precedence} \\
+\end{array}$$
+
+\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 6b14029cca03a763f05baf08ce362d8a250b4288 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 14:57:47 -0500
Subject: Signatures
---
doc/manual.tex | 73 ++++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 48 insertions(+), 25 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 8517206a..e83dc392 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -29,57 +29,80 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
$\rc$ & \cd{++} \\
\\
$x$ & Normal textual identifier, not beginning with an uppercase letter \\
- $\alpha$ & Normal textual identifier, not beginning with an uppercase letter \\
- $f$ & Normal textual identifier, beginning with an uppercase letter \\
+ $X$ & Normal textual identifier, beginning with an uppercase letter \\
\end{tabular}
\end{center}
-We often write syntax like $N, \cdots, N$ to stand for the non-terminal $N$ repeated 0 or more times. That is, the $\cdots$ symbol is not translated literally to ASCII.
+We often write syntax like $e^*$ to indicate zero or more copies of $e$, $e^+$ to indicate one or more copies, and $e,^*$ and $e,^+$ to indicate multiple copies separated by commas. Another separator may be used in place of a comma. The $e$ term may be surrounded by parentheses to indicate grouping; those parentheses should not be included in the actual ASCII.
\subsection{Core Syntax}
\emph{Kinds} classify types and other compile-time-only entities. Each kind in the grammar is listed with a description of the sort of data it classifies.
$$\begin{array}{rrcll}
\textrm{Kinds} & \kappa &::=& \mt{Type} & \textrm{proper types} \\
- &&& \mid \mt{Unit} & \textrm{the trivial constructor} \\
- &&& \mid \mt{Name} & \textrm{field names} \\
- &&& \mid \kappa \to \kappa & \textrm{type-level functions} \\
- &&& \mid \{\kappa\} & \textrm{type-level records} \\
- &&& \mid (\kappa \times \cdots \times \kappa) & \textrm{type-level tuples} \\
- &&& \mid (\kappa) & \textrm{explicit precedence} \\
+ &&& \mt{Unit} & \textrm{the trivial constructor} \\
+ &&& \mt{Name} & \textrm{field names} \\
+ &&& \kappa \to \kappa & \textrm{type-level functions} \\
+ &&& \{\kappa\} & \textrm{type-level records} \\
+ &&& (\kappa\times^+) & \textrm{type-level tuples} \\
+ &&& (\kappa) & \textrm{explicit precedence} \\
\end{array}$$
Ur supports several different notions of functions that take types as arguments. These arguments can be either implicit, causing them to be inferred at use sites; or explicit, forcing them to be specified manually at use sites. There is a common explicitness annotation convention applied at the definitions of and in the types of such functions.
$$\begin{array}{rrcll}
\textrm{Explicitness} & ? &::=& :: & \textrm{explicit} \\
- &&& \mid \; ::: & \textrm{implicit}
+ &&& \; ::: & \textrm{implicit}
\end{array}$$
\emph{Constructors} are the main class of compile-time-only data. They include proper types and are classified by kinds.
$$\begin{array}{rrcll}
\textrm{Constructors} & c, \tau &::=& (c) :: \kappa & \textrm{kind annotation} \\
- &&& \mid \alpha & \textrm{constructor variable} \\
+ &&& x & \textrm{constructor variable} \\
\\
- &&& \mid \tau \to \tau & \textrm{function type} \\
- &&& \mid \alpha \; ? \; \kappa \to \tau & \textrm{polymorphic function type} \\
- &&& \mid \$ c & \textrm{record type} \\
+ &&& \tau \to \tau & \textrm{function type} \\
+ &&& x \; ? \; \kappa \to \tau & \textrm{polymorphic function type} \\
+ &&& \$ c & \textrm{record type} \\
\\
- &&& \mid c \; c & \textrm{type-level function application} \\
- &&& \mid \lambda \alpha \; ? \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
+ &&& c \; c & \textrm{type-level function application} \\
+ &&& \lambda x \; ? \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
\\
- &&& \mid () & \textrm{type-level unit} \\
- &&& \mid \#f & \textrm{field name} \\
+ &&& () & \textrm{type-level unit} \\
+ &&& \#X & \textrm{field name} \\
\\
- &&& \mid [c = c, \cdots, c = c] & \textrm{known-length type-level record} \\
- &&& \mid c \rc c & \textrm{type-level record concatenation} \\
- &&& \mid \mt{fold} & \textrm{type-level record fold} \\
+ &&& [(c = c)^*] & \textrm{known-length type-level record} \\
+ &&& c \rc c & \textrm{type-level record concatenation} \\
+ &&& \mt{fold} & \textrm{type-level record fold} \\
\\
- &&& \mid (c, \cdots, c) & \textrm{type-level tuple} \\
- &&& \mid c.n & \textrm{type-level tuple projection ($n \in \mathbb N^+$)} \\
+ &&& (c^+) & \textrm{type-level tuple} \\
+ &&& c.n & \textrm{type-level tuple projection ($n \in \mathbb N^+$)} \\
\\
- &&& \mid \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
+ &&& \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
\\
- &&& \mid (c) & \textrm{explicit precedence} \\
+ &&& (c) & \textrm{explicit precedence} \\
+\end{array}$$
+
+Modules of the module system are described by \emph{signatures}.
+$$\begin{array}{rrcll}
+ \textrm{Signatures} & S &::=& \mt{sig} \; s^* \; \mt{end} & \textrm{constant} \\
+ &&& X & \textrm{variable} \\
+ &&& \mt{functor}(X : S) : S & \textrm{functor} \\
+ &&& S \; \mt{where} \; x = c & \textrm{concretizing an abstract constructor} \\
+ &&& M.X & \textrm{projection from a module} \\
+ \\
+ \textrm{Signature items} & s &::=& \mt{con} \; x :: \kappa & \textrm{abstract constructor} \\
+ &&& \mt{con} \; x :: \kappa = c & \textrm{concrete constructor} \\
+ &&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype declaration} \\
+ &&& \mt{datatype} \; x = M.x & \textrm{algebraic datatype import} \\
+ &&& \mt{val} \; x : \tau & \textrm{value} \\
+ &&& \mt{structure} \; X : S & \textrm{sub-module} \\
+ &&& \mt{signature} \; X = S & \textrm{sub-signature} \\
+ &&& \mt{include} \; S & \textrm{signature inclusion} \\
+ &&& \mt{constraint} \; c \sim c & \textrm{record disjointness constraint} \\
+ &&& \mt{class} \; x & \textrm{abstract type class} \\
+ &&& \mt{class} \; x = c & \textrm{concrete type class} \\
+ \\
+ \textrm{Datatype constructors} & dc &::=& X & \textrm{nullary constructor} \\
+ &&& X \; \mt{of} \; \tau & \textrm{unary constructor} \\
\end{array}$$
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From a9c2432822c68cfc0897c162b17af6b69d0e22b7 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 15:06:29 -0500
Subject: Patterns
---
doc/manual.tex | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index e83dc392..01f5a5f3 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -35,6 +35,8 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
We often write syntax like $e^*$ to indicate zero or more copies of $e$, $e^+$ to indicate one or more copies, and $e,^*$ and $e,^+$ to indicate multiple copies separated by commas. Another separator may be used in place of a comma. The $e$ term may be surrounded by parentheses to indicate grouping; those parentheses should not be included in the actual ASCII.
+We write $\ell$ for literals of the primitive types, for the most part following C conventions. There are $\mt{int}$, $\mt{float}$, and $\mt{string}$ literals.
+
\subsection{Core Syntax}
\emph{Kinds} classify types and other compile-time-only entities. Each kind in the grammar is listed with a description of the sort of data it classifies.
@@ -105,4 +107,18 @@ $$\begin{array}{rrcll}
&&& X \; \mt{of} \; \tau & \textrm{unary constructor} \\
\end{array}$$
+\emph{Patterns} are used to describe structural conditions on expressions, such that expressions may be tested against patterns, generating assignments to pattern variables if successful.
+$$\begin{array}{rrcll}
+ \textrm{Patterns} & p &::=& \_ & \textrm{wildcard} \\
+ &&& x & \textrm{variable} \\
+ &&& \ell & \textrm{constant} \\
+ &&& \hat{X} & \textrm{nullary constructor} \\
+ &&& \hat{X} \; p & \textrm{unary constructor} \\
+ &&& \{(x = p,)^*\} & \textrm{rigid record pattern} \\
+ &&& \{(x = p,)^+, \ldots\} & \textrm{flexible record pattern} \\
+ \\
+ \textrm{Qualified capitalized variable} & \hat{X} &::=& X & \textrm{not from a module} \\
+ &&& M.X & \textrm{projection from a module} \\
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 5e3c42711e20b42ba7f850cc5800f01cbfee3f05 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 15:27:17 -0500
Subject: Expressions
---
doc/manual.tex | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 01f5a5f3..18879a50 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -5,6 +5,8 @@
\newcommand{\mt}[1]{\mathsf{#1}}
\newcommand{\rc}{+ \hspace{-.075in} + \;}
+\newcommand{\rcut}{\; \texttt{--} \;}
+\newcommand{\rcutM}{\; \texttt{---} \;}
\begin{document}
@@ -26,7 +28,7 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
$\times$ & \cd{*} \\
$\lambda$ & \cd{fn} \\
$\Rightarrow$ & \cd{=>} \\
- $\rc$ & \cd{++} \\
+ & \cd{---} \\
\\
$x$ & Normal textual identifier, not beginning with an uppercase letter \\
$X$ & Normal textual identifier, beginning with an uppercase letter \\
@@ -37,6 +39,8 @@ We often write syntax like $e^*$ to indicate zero or more copies of $e$, $e^+$ t
We write $\ell$ for literals of the primitive types, for the most part following C conventions. There are $\mt{int}$, $\mt{float}$, and $\mt{string}$ literals.
+This version of the manual doesn't include operator precedences; see \texttt{src/urweb.grm} for that.
+
\subsection{Core Syntax}
\emph{Kinds} classify types and other compile-time-only entities. Each kind in the grammar is listed with a description of the sort of data it classifies.
@@ -47,6 +51,7 @@ $$\begin{array}{rrcll}
&&& \kappa \to \kappa & \textrm{type-level functions} \\
&&& \{\kappa\} & \textrm{type-level records} \\
&&& (\kappa\times^+) & \textrm{type-level tuples} \\
+ &&& \_ & \textrm{wildcard} \\
&&& (\kappa) & \textrm{explicit precedence} \\
\end{array}$$
@@ -80,6 +85,7 @@ $$\begin{array}{rrcll}
\\
&&& \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
\\
+ &&& \_ & \textrm{wildcard} \\
&&& (c) & \textrm{explicit precedence} \\
\end{array}$$
@@ -116,9 +122,42 @@ $$\begin{array}{rrcll}
&&& \hat{X} \; p & \textrm{unary constructor} \\
&&& \{(x = p,)^*\} & \textrm{rigid record pattern} \\
&&& \{(x = p,)^+, \ldots\} & \textrm{flexible record pattern} \\
+ &&& (p) & \textrm{explicit precedence} \\
\\
\textrm{Qualified capitalized variable} & \hat{X} &::=& X & \textrm{not from a module} \\
&&& M.X & \textrm{projection from a module} \\
\end{array}$$
+\emph{Expressions} are the main run-time entities, corresponding to both ``expressions'' and ``statements'' in mainstream imperative languages.
+$$\begin{array}{rrcll}
+ \textrm{Expressions} & e &::=& e : \tau & \textrm{type annotation} \\
+ &&& x & \textrm{variable} \\
+ &&& \ell & \textrm{constant} \\
+ \\
+ &&& e \; e & \textrm{function application} \\
+ &&& \lambda x : \tau \Rightarrow e & \textrm{function abstraction} \\
+ &&& e [c] & \textrm{polymorphic function application} \\
+ &&& \lambda x \; ? \; \kappa \Rightarrow e & \textrm{polymorphic function abstraction} \\
+ \\
+ &&& \{(c = e,)^*\} & \textrm{known-length record} \\
+ &&& e.c & \textrm{record field projection} \\
+ &&& e \rc e & \textrm{record concatenation} \\
+ &&& e \rcut c & \textrm{removal of a single record field} \\
+ &&& e \rcutM c & \textrm{removal of multiple record fields} \\
+ &&& \mt{fold} & \textrm{fold over fields of a type-level record} \\
+ \\
+ &&& \mt{let} \; ed^* \; \mt{in} \; e \; \mt{end} & \textrm{local definitions} \\
+ \\
+ &&& \mt{case} \; e \; \mt{of} \; (p \Rightarrow e|)^+ & \textrm{pattern matching} \\
+ \\
+ &&& \lambda [c \sim c] \Rightarrow e & \textrm{guarded expression} \\
+ \\
+ &&& \_ & \textrm{wildcard} \\
+ &&& (e) & \textrm{explicit precedence} \\
+ \\
+ \textrm{Local declarations} & ed &::=& \cd{val} \; x : \tau = e & \textrm{non-recursive value} \\
+ &&& \cd{val} \; \cd{rec} \; (x : \tau = e \; \cd{and})^+ & \textrm{mutually-recursive values} \\
+\end{array}$$
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 5f87548c461b829071799d897bd10e5cd4a557a4 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 15:43:10 -0500
Subject: Declarations and modules
---
doc/manual.tex | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 18879a50..b1042fdb 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -99,7 +99,7 @@ $$\begin{array}{rrcll}
\\
\textrm{Signature items} & s &::=& \mt{con} \; x :: \kappa & \textrm{abstract constructor} \\
&&& \mt{con} \; x :: \kappa = c & \textrm{concrete constructor} \\
- &&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype declaration} \\
+ &&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype definition} \\
&&& \mt{datatype} \; x = M.x & \textrm{algebraic datatype import} \\
&&& \mt{val} \; x : \tau & \textrm{value} \\
&&& \mt{structure} \; X : S & \textrm{sub-module} \\
@@ -159,5 +159,30 @@ $$\begin{array}{rrcll}
&&& \cd{val} \; \cd{rec} \; (x : \tau = e \; \cd{and})^+ & \textrm{mutually-recursive values} \\
\end{array}$$
+\emph{Declarations} primarily bring new symbols into context.
+$$\begin{array}{rrcll}
+ \textrm{Declarations} & d &::=& \mt{con} \; x :: \kappa = c & \textrm{constructor synonym} \\
+ &&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype definition} \\
+ &&& \mt{datatype} \; x = M.x & \textrm{algebraic datatype import} \\
+ &&& \mt{val} \; x : \tau = e & \textrm{value} \\
+ &&& \mt{val} \; \cd{rec} \; (x : \tau = e \; \mt{and})^+ & \textrm{mutually-recursive values} \\
+ &&& \mt{structure} \; X : S = M & \textrm{module definition} \\
+ &&& \mt{signature} \; X = S & \textrm{signature definition} \\
+ &&& \mt{open} \; M & \textrm{module inclusion} \\
+ &&& \mt{constraint} \; c \sim c & \textrm{record disjointness constraint} \\
+ &&& \mt{open} \; \mt{constraints} \; M & \textrm{inclusion of just the constraints from a module} \\
+ &&& \mt{table} \; x : c & \textrm{SQL table} \\
+ &&& \mt{sequence} \; x & \textrm{SQL sequence} \\
+ &&& \mt{class} \; x = c & \textrm{concrete type class} \\
+ &&& \mt{cookie} \; x : c & \textrm{HTTP cookie} \\
+ \\
+ \textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \mt{constant} \\
+ &&& X & \mt{variable} \\
+ &&& M.X & \mt{projection} \\
+ &&& M(M) & \mt{functor application} \\
+ &&& \mt{functor}(X : S) : S = M & \mt{functor abstraction} \\
+\end{array}$$
+
+There are two kinds of Ur files. A file named $M\texttt{.ur}$ is an \emph{implementation file}, and it should contain a sequence of declarations $d^*$. A file named $M\texttt{.urs}$ is an \emph{interface file}; it must always have a matching $M\texttt{.ur}$ and should contain a sequence of signature items $s^*$. When both files are present, the overall effect is the same as a monolithic declaration $\mt{structure} \; M : \mt{sig} \; s^* \; \mt{end} = \mt{struct} \; d^* \; \mt{end}$. When no interface file is included, the overall effect is similar, with a signature for module $M$ being inferred rather than just checked against an interface.
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From e5d50c25383c90543455c6977270c3a675f888d4 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 27 Nov 2008 16:55:30 -0500
Subject: Shorthands
---
doc/manual.tex | 74 ++++++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 59 insertions(+), 15 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b1042fdb..9a2f4173 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -15,7 +15,9 @@
\maketitle
-\section{Syntax}
+\section{Ur Syntax}
+
+In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
\subsection{Lexical Conventions}
@@ -28,7 +30,9 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
$\times$ & \cd{*} \\
$\lambda$ & \cd{fn} \\
$\Rightarrow$ & \cd{=>} \\
- & \cd{---} \\
+ $\neq$ & \cd{<>} \\
+ $\leq$ & \cd{<=} \\
+ $\geq$ & \cd{>=} \\
\\
$x$ & Normal textual identifier, not beginning with an uppercase letter \\
$X$ & Normal textual identifier, beginning with an uppercase letter \\
@@ -51,7 +55,7 @@ $$\begin{array}{rrcll}
&&& \kappa \to \kappa & \textrm{type-level functions} \\
&&& \{\kappa\} & \textrm{type-level records} \\
&&& (\kappa\times^+) & \textrm{type-level tuples} \\
- &&& \_ & \textrm{wildcard} \\
+ &&& \_\_ & \textrm{wildcard} \\
&&& (\kappa) & \textrm{explicit precedence} \\
\end{array}$$
@@ -85,7 +89,7 @@ $$\begin{array}{rrcll}
\\
&&& \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
\\
- &&& \_ & \textrm{wildcard} \\
+ &&& \_ :: \kappa & \textrm{wildcard} \\
&&& (c) & \textrm{explicit precedence} \\
\end{array}$$
@@ -94,13 +98,13 @@ $$\begin{array}{rrcll}
\textrm{Signatures} & S &::=& \mt{sig} \; s^* \; \mt{end} & \textrm{constant} \\
&&& X & \textrm{variable} \\
&&& \mt{functor}(X : S) : S & \textrm{functor} \\
- &&& S \; \mt{where} \; x = c & \textrm{concretizing an abstract constructor} \\
+ &&& S \; \mt{where} \; \mt{con} \; x = c & \textrm{concretizing an abstract constructor} \\
&&& M.X & \textrm{projection from a module} \\
\\
\textrm{Signature items} & s &::=& \mt{con} \; x :: \kappa & \textrm{abstract constructor} \\
&&& \mt{con} \; x :: \kappa = c & \textrm{concrete constructor} \\
&&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype definition} \\
- &&& \mt{datatype} \; x = M.x & \textrm{algebraic datatype import} \\
+ &&& \mt{datatype} \; x = \mt{datatype} \; M.x & \textrm{algebraic datatype import} \\
&&& \mt{val} \; x : \tau & \textrm{value} \\
&&& \mt{structure} \; X : S & \textrm{sub-module} \\
&&& \mt{signature} \; X = S & \textrm{sub-signature} \\
@@ -124,14 +128,15 @@ $$\begin{array}{rrcll}
&&& \{(x = p,)^+, \ldots\} & \textrm{flexible record pattern} \\
&&& (p) & \textrm{explicit precedence} \\
\\
- \textrm{Qualified capitalized variable} & \hat{X} &::=& X & \textrm{not from a module} \\
+ \textrm{Qualified capitalized variables} & \hat{X} &::=& X & \textrm{not from a module} \\
&&& M.X & \textrm{projection from a module} \\
\end{array}$$
\emph{Expressions} are the main run-time entities, corresponding to both ``expressions'' and ``statements'' in mainstream imperative languages.
$$\begin{array}{rrcll}
\textrm{Expressions} & e &::=& e : \tau & \textrm{type annotation} \\
- &&& x & \textrm{variable} \\
+ &&& \hat{x} & \textrm{variable} \\
+ &&& \hat{X} & \textrm{datatype constructor} \\
&&& \ell & \textrm{constant} \\
\\
&&& e \; e & \textrm{function application} \\
@@ -157,13 +162,16 @@ $$\begin{array}{rrcll}
\\
\textrm{Local declarations} & ed &::=& \cd{val} \; x : \tau = e & \textrm{non-recursive value} \\
&&& \cd{val} \; \cd{rec} \; (x : \tau = e \; \cd{and})^+ & \textrm{mutually-recursive values} \\
+ \\
+ \textrm{Qualified uncapitalized variables} & \hat{x} &::=& x & \textrm{not from a module} \\
+ &&& M.x & \textrm{projection from a module} \\
\end{array}$$
\emph{Declarations} primarily bring new symbols into context.
$$\begin{array}{rrcll}
\textrm{Declarations} & d &::=& \mt{con} \; x :: \kappa = c & \textrm{constructor synonym} \\
&&& \mt{datatype} \; x \; x^* = dc\mid^+ & \textrm{algebraic datatype definition} \\
- &&& \mt{datatype} \; x = M.x & \textrm{algebraic datatype import} \\
+ &&& \mt{datatype} \; x = \mt{datatype} \; M.x & \textrm{algebraic datatype import} \\
&&& \mt{val} \; x : \tau = e & \textrm{value} \\
&&& \mt{val} \; \cd{rec} \; (x : \tau = e \; \mt{and})^+ & \textrm{mutually-recursive values} \\
&&& \mt{structure} \; X : S = M & \textrm{module definition} \\
@@ -174,15 +182,51 @@ $$\begin{array}{rrcll}
&&& \mt{table} \; x : c & \textrm{SQL table} \\
&&& \mt{sequence} \; x & \textrm{SQL sequence} \\
&&& \mt{class} \; x = c & \textrm{concrete type class} \\
- &&& \mt{cookie} \; x : c & \textrm{HTTP cookie} \\
+ &&& \mt{cookie} \; x : \tau & \textrm{HTTP cookie} \\
\\
- \textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \mt{constant} \\
- &&& X & \mt{variable} \\
- &&& M.X & \mt{projection} \\
- &&& M(M) & \mt{functor application} \\
- &&& \mt{functor}(X : S) : S = M & \mt{functor abstraction} \\
+ \textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \textrm{constant} \\
+ &&& X & \textrm{variable} \\
+ &&& M.X & \textrm{projection} \\
+ &&& M(M) & \textrm{functor application} \\
+ &&& \mt{functor}(X : S) : S = M & \textrm{functor abstraction} \\
\end{array}$$
There are two kinds of Ur files. A file named $M\texttt{.ur}$ is an \emph{implementation file}, and it should contain a sequence of declarations $d^*$. A file named $M\texttt{.urs}$ is an \emph{interface file}; it must always have a matching $M\texttt{.ur}$ and should contain a sequence of signature items $s^*$. When both files are present, the overall effect is the same as a monolithic declaration $\mt{structure} \; M : \mt{sig} \; s^* \; \mt{end} = \mt{struct} \; d^* \; \mt{end}$. When no interface file is included, the overall effect is similar, with a signature for module $M$ being inferred rather than just checked against an interface.
+\subsection{Shorthands}
+
+There are a variety of derived syntactic forms that elaborate into the core syntax from the last subsection. We will present the additional forms roughly following the order in which we presented the constructs that they elaborate into.
+
+In many contexts where record fields are expected, like in a projection $e.c$, a constant field may be written as simply $X$, rather than $\#X$.
+
+A record type may be written $\{(c = c,)^*\}$, which elaborates to $\$[(c = c,)^*]$.
+
+A tuple type $(\tau_1, \ldots, \tau_n)$ expands to a record type $\{1 = \tau_1, \ldots, n = \tau_n\}$, with natural numbers as field names. A tuple pattern $(p_1, \ldots, p_n)$ expands to a rigid record pattern $\{1 = p_1, \ldots, n = p_n\}$. Positive natural numbers may be used in most places where field names would be allowed.
+
+In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid [c \sim c]$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
+
+For any signature item or declaration that defines some entity to be equal to $A$ with classification annotation $B$ (e.g., $\mt{val} \; x : B = A$), $B$ and the preceding colon (or similar punctuation) may be omitted, in which case it is filled in as a wildcard.
+
+A signature item or declaration $\mt{type} \; x$ or $\mt{type} \; x = \tau$ is elaborated into $\mt{con} \; x :: \mt{Type}$ or $\mt{con} \; x :: \mt{Type} = \tau$, respectively.
+
+A signature item or declaration $\mt{class} \; x = \lambda y :: \mt{Type} \Rightarrow c$ may be abbreviated $\mt{class} \; x \; y = c$.
+
+Handling of implicit and explicit constructor arguments may be tweaked with some prefixes to variable references. An expression $@x$ is a version of $x$ where all implicit constructor arguments have been made explicit. An expression $@@x$ achieves the same effect, additionally halting automatic resolution of type class instances. The same syntax works for variables projected out of modules and for capitalized variables (datatype constructors).
+
+At the expression level, an analogue is available of the composite $\lambda$ form for constructors. We define the language of binders as $b ::= x \mid (x : \tau) \mid (x \; ? \; \kappa) \mid [c \sim c]$. A lone variable $x$ as a binder stands for an expression variable of unspecified type.
+
+A $\mt{val}$ or $\mt{val} \; \mt{rec}$ declaration may include expression binders before the equal sign, following the binder grammar from the last paragraph. Such declarations are elaborated into versions that add additional $\lambda$s to the fronts of the righthand sides, as appropriate. The keyword $\mt{fun}$ is a synonym for $\mt{val} \; \mt{rec}$.
+
+A signature item $\mt{functor} \; X_1 \; (X_2 : S_1) : S_2$ is elaborated into $\mt{structure} \; X_1 : \mt{functor}(X_2 : S_1) : S_2$. A declaration $\mt{functor} \; X_1 \; (X_2 : S_1) : S_2 = M$ is elaborated into $\mt{structure} \; X_1 : \mt{functor}(X_2 : S_1) : S_2 = \mt{functor}(X_2 : S_1) : S_2 = M$.
+
+A declaration $\mt{table} \; x : \{(c = c,)^*\}$ is elaborated into $\mt{table} \; x : [(c = c,)^*]$
+
+The syntax $\mt{where} \; \mt{type}$ is an alternate form of $\mt{where} \; \mt{con}$.
+
+The syntax $\mt{if} \; e \; \mt{then} \; e_1 \; \mt{else} \; e_2$ expands to $\mt{case} \; e \; \mt{of} \; \mt{Basis}.\mt{True} \Rightarrow e_1 \mid \mt{Basis}.\mt{False} \Rightarrow e_2$.
+
+There are infix operator syntaxes for a number of functions defined in the $\mt{Basis}$ module. There is $=$ for $\mt{eq}$, $\neq$ for $\mt{neq}$, $-$ for $\mt{neg}$ (as a prefix operator) and $\mt{minus}$, $+$ for $\mt{plus}$, $\times$ for $\mt{times}$, $/$ for $\mt{div}$, $\%$ for $\mt{mod}$, $<$ for $\mt{lt}$, $\leq$ for $\mt{le}$, $>$ for $\mt{gt}$, and $\geq$ for $\mt{ge}$.
+
+A signature item $\mt{table} \; x : c$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{sql\_table} \; c$. $\mt{sequence} \; x$ is short for $\mt{val} \; x : \mt{Basis}.\mt{sql\_sequence}$, and $\mt{cookie} \; x : \tau$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{http\_cookie} \; \tau$.
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 413a2ddcfcbf235bf0cdd220f7ecefe93db37bf0 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 09:34:11 -0500
Subject: Kinding
---
doc/manual.tex | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 90 insertions(+), 5 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 9a2f4173..0bd129cd 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -68,14 +68,14 @@ $$\begin{array}{rrcll}
\emph{Constructors} are the main class of compile-time-only data. They include proper types and are classified by kinds.
$$\begin{array}{rrcll}
\textrm{Constructors} & c, \tau &::=& (c) :: \kappa & \textrm{kind annotation} \\
- &&& x & \textrm{constructor variable} \\
+ &&& \hat{x} & \textrm{constructor variable} \\
\\
&&& \tau \to \tau & \textrm{function type} \\
&&& x \; ? \; \kappa \to \tau & \textrm{polymorphic function type} \\
&&& \$ c & \textrm{record type} \\
\\
&&& c \; c & \textrm{type-level function application} \\
- &&& \lambda x \; ? \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
+ &&& \lambda x \; :: \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
\\
&&& () & \textrm{type-level unit} \\
&&& \#X & \textrm{field name} \\
@@ -91,6 +91,9 @@ $$\begin{array}{rrcll}
\\
&&& \_ :: \kappa & \textrm{wildcard} \\
&&& (c) & \textrm{explicit precedence} \\
+ \\
+ \textrm{Qualified uncapitalized variables} & \hat{x} &::=& x & \textrm{not from a module} \\
+ &&& M.x & \textrm{projection from a module} \\
\end{array}$$
Modules of the module system are described by \emph{signatures}.
@@ -162,9 +165,6 @@ $$\begin{array}{rrcll}
\\
\textrm{Local declarations} & ed &::=& \cd{val} \; x : \tau = e & \textrm{non-recursive value} \\
&&& \cd{val} \; \cd{rec} \; (x : \tau = e \; \cd{and})^+ & \textrm{mutually-recursive values} \\
- \\
- \textrm{Qualified uncapitalized variables} & \hat{x} &::=& x & \textrm{not from a module} \\
- &&& M.x & \textrm{projection from a module} \\
\end{array}$$
\emph{Declarations} primarily bring new symbols into context.
@@ -229,4 +229,89 @@ There are infix operator syntaxes for a number of functions defined in the $\mt{
A signature item $\mt{table} \; x : c$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{sql\_table} \; c$. $\mt{sequence} \; x$ is short for $\mt{val} \; x : \mt{Basis}.\mt{sql\_sequence}$, and $\mt{cookie} \; x : \tau$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{http\_cookie} \; \tau$.
+
+\section{Static Semantics}
+
+In this section, we give a declarative presentation of Ur's typing rules and related judgments. Inference is the subject of the next section; here, we assume that an oracle has filled in all wildcards with concrete values.
+
+Since there is significant mutual recursion among the judgments, we introduce them all before beginning to give rules. We use the same variety of contexts throughout this section, implicitly introducing new sorts of context entries as needed.
+\begin{itemize}
+\item $\Gamma \vdash c :: \kappa$ assigns a kind to a constructor in a context.
+\item $\Gamma \vdash c \sim c$ proves the disjointness of two record constructors; that is, that they share no field names. We overload the judgment to apply to pairs of field names as well.
+\item $\Gamma \vdash c \hookrightarrow \overline{c}$ proves that record constructor $c$ decomposes into set $\overline{c}$ of field names and record constructors.
+\item $\Gamma \vdash c \equiv c$ proves the computational equivalence of two constructors. This is often called a \emph{definitional equality} in the world of type theory.
+\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
+\item $\Gamma \vdash M : S$ is the module signature checking judgment.
+\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
+\end{itemize}
+
+\subsection{Kinding}
+
+$$\infer{\Gamma \vdash (c) :: \kappa :: \kappa}{
+ \Gamma \vdash c :: \kappa
+}
+\quad \infer{\Gamma \vdash x :: \kappa}{
+ x :: \kappa \in \Gamma
+}
+\quad \infer{\Gamma \vdash x :: \kappa}{
+ x :: \kappa = c \in \Gamma
+}$$
+
+$$\infer{\Gamma \vdash M.x :: \kappa}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{con} \; x) = \kappa
+}
+\quad \infer{\Gamma \vdash M.x :: \kappa}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{con} \; x) = (\kappa, c)
+}$$
+
+$$\infer{\Gamma \vdash \tau_1 \to \tau_2 :: \mt{Type}}{
+ \Gamma \vdash \tau_1 :: \mt{Type}
+ & \Gamma \vdash \tau_2 :: \mt{Type}
+}
+\quad \infer{\Gamma \vdash x \; ? \: \kappa \to \tau :: \mt{Type}}{
+ \Gamma, x :: \kappa \vdash \tau :: \mt{Type}
+}
+\quad \infer{\Gamma \vdash \$c :: \mt{Type}}{
+ \Gamma \vdash c :: \{\mt{Type}\}
+}$$
+
+$$\infer{\Gamma \vdash c_1 \; c_2 :: \kappa_2}{
+ \Gamma \vdash c_1 :: \kappa_1 \to \kappa_2
+ & \Gamma \vdash c_2 :: \kappa_1
+}
+\quad \infer{\Gamma \vdash \lambda x \; :: \; \kappa_1 \Rightarrow c :: \kappa_1 \to \kappa_2}{
+ \Gamma, x :: \kappa_1 \vdash c :: \kappa_2
+}$$
+
+$$\infer{\Gamma \vdash () :: \mt{Unit}}{}
+\quad \infer{\Gamma \vdash \#X :: \mt{Name}}{}$$
+
+$$\infer{\Gamma \vdash [\overline{c_i = c'_i}] :: \{\kappa\}}{
+ \forall i: \Gamma \vdash c_i : \mt{Name}
+ & \Gamma \vdash c'_i :: \kappa
+ & \forall i \neq j: \Gamma \vdash c_i \sim c_j
+}
+\quad \infer{\Gamma \vdash c_1 \rc c_2 :: \{\kappa\}}{
+ \Gamma \vdash c_1 :: \{\kappa\}
+ & \Gamma \vdash c_2 :: \{\kappa\}
+ & \Gamma \vdash c_1 \sim c_2
+}$$
+
+$$\infer{\Gamma \vdash \mt{fold} :: ((\mt{Name} \to \kappa_1 \to \kappa_2 \to \kappa_2) \to \kappa_2 \to \{\kappa_1\} \to \kappa_2}{}$$
+
+$$\infer{\Gamma \vdash (\overline c) :: (k_1 \times \ldots \times k_n)}{
+ \forall i: \Gamma \vdash c_i :: k_i
+}
+\quad \infer{\Gamma \vdash c.i :: k_i}{
+ \Gamma \vdash c :: (k_1 \times \ldots \times k_n)
+}$$
+
+$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c :: \kappa}{
+ \Gamma \vdash c_1 :: \{\kappa'\}
+ & \Gamma \vdash c_2 :: \{\kappa'\}
+ & \Gamma, c_1 \sim c_2 \vdash c :: \kappa
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 96c1d0efd00362926493295a132c19a209ac7838 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 09:48:10 -0500
Subject: Disjointness
---
doc/manual.tex | 38 +++++++++++++++++++++++++++++++++++++-
1 file changed, 37 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 0bd129cd..2b0f2c57 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -238,7 +238,7 @@ Since there is significant mutual recursion among the judgments, we introduce th
\begin{itemize}
\item $\Gamma \vdash c :: \kappa$ assigns a kind to a constructor in a context.
\item $\Gamma \vdash c \sim c$ proves the disjointness of two record constructors; that is, that they share no field names. We overload the judgment to apply to pairs of field names as well.
-\item $\Gamma \vdash c \hookrightarrow \overline{c}$ proves that record constructor $c$ decomposes into set $\overline{c}$ of field names and record constructors.
+\item $\Gamma \vdash c \hookrightarrow C$ proves that record constructor $c$ decomposes into set $C$ of field names and record constructors.
\item $\Gamma \vdash c \equiv c$ proves the computational equivalence of two constructors. This is often called a \emph{definitional equality} in the world of type theory.
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
@@ -314,4 +314,40 @@ $$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c :: \kappa}{
& \Gamma, c_1 \sim c_2 \vdash c :: \kappa
}$$
+\subsection{Record Disjointness}
+
+We will use a keyword $\mt{map}$ as a shorthand, such that, for $f$ of kind $\kappa \to \kappa'$, $\mt{map} \; f$ stands for $\mt{fold} \; (\lambda (x_1 :: \mt{Name}) (x_2 :: \kappa) (x_3 :: \{\kappa'\}) \Rightarrow [x_1 = f \; x_2] \rc x_3) \; []$.
+
+$$\infer{\Gamma \vdash c_1 \sim c_2}{
+ \Gamma \vdash c_1 \hookrightarrow c'_1
+ & \Gamma \vdash c_2 \hookrightarrow c'_2
+ & \forall c''_1 \in c'_1, c''_2 \in c'_2: \Gamma \vdash c''_1 \sim c''_2
+}
+\quad \infer{\Gamma \vdash X \sim X'}{
+ X \neq X'
+}$$
+
+$$\infer{\Gamma \vdash c_1 \sim c_2}{
+ c'_1 \sim c'_2 \in \Gamma
+ & \Gamma \vdash c'_1 \hookrightarrow c''_1
+ & \Gamma \vdash c'_2 \hookrightarrow c''_2
+ & c_1 \in c''_1
+ & c_2 \in c''_2
+}$$
+
+$$\infer{\Gamma \vdash c \hookrightarrow \{c\}}{}
+\quad \infer{\Gamma \vdash [\overline{c = c'}] \hookrightarrow \{\overline{c}\}}{}
+\quad \infer{\Gamma \vdash c_1 \rc c_2 \hookrightarrow C_1 \cup C_2}{
+ \Gamma \vdash c_1 \hookrightarrow C_1
+ & \Gamma \vdash c_2 \hookrightarrow C_2
+}
+\quad \infer{\Gamma \vdash c \hookrightarrow C}{
+ \Gamma \vdash c \equiv c'
+ & \Gamma \vdash c' \hookrightarrow C
+}
+\quad \infer{\Gamma \vdash \mt{map} \; f \; c \hookrightarrow C}{
+ \Gamma \vdash c \hookrightarrow C
+}$$
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From e4fff6ca5e4e4d1e6a4dba3456a002e4f6bc3e2d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 10:05:46 -0500
Subject: Definitional equality
---
doc/manual.tex | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 2b0f2c57..cff270df 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -349,5 +349,48 @@ $$\infer{\Gamma \vdash c \hookrightarrow \{c\}}{}
\Gamma \vdash c \hookrightarrow C
}$$
+\subsection{Definitional Equality}
+
+We use $\mathcal C$ to stand for a one-hole context that, when filled, yields a constructor. The notation $\mathcal C[c]$ plugs $c$ into $\mathcal C$. We omit the standard definition of one-hole contexts. We write $[x \mapsto c_1]c_2$ for capture-avoiding substitution of $c_1$ for $x$ in $c_2$.
+
+$$\infer{\Gamma \vdash c \equiv c}{}
+\quad \infer{\Gamma \vdash c_1 \equiv c_2}{
+ \Gamma \vdash c_2 \equiv c_1
+}
+\quad \infer{\Gamma \vdash c_1 \equiv c_3}{
+ \Gamma \vdash c_1 \equiv c_2
+ & \Gamma \vdash c_2 \equiv c_3
+}
+\quad \infer{\Gamma \vdash \mathcal C[c_1] \equiv \mathcal C[c_2]}{
+ \Gamma \vdash c_1 \equiv c_2
+}$$
+
+$$\infer{\Gamma \vdash x \equiv c}{
+ x :: \kappa = c \in \Gamma
+}
+\quad \infer{\Gamma \vdash M.x \equiv c}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{con} \; x) = (\kappa, c)
+}
+\quad \infer{\Gamma \vdash (\overline c).i \equiv c_i}{}$$
+
+$$\infer{\Gamma \vdash (\lambda x :: \kappa \Rightarrow c) \; c' \equiv [x \mapsto c'] c}{}
+\quad \infer{\Gamma \vdash c_1 \rc c_2 \equiv c_2 \rc c_1}{}
+\quad \infer{\Gamma \vdash c_1 \rc (c_2 \rc c_3) \equiv (c_1 \rc c_2) \rc c_3}{}$$
+
+$$\infer{\Gamma \vdash [] \rc c \equiv c}{}
+\quad \infer{\Gamma \vdash [\overline{c_1 = c'_1}] \rc [\overline{c_2 = c'_2}] \equiv [\overline{c_1 = c'_1}, \overline{c_2 = c'_2}]}{}$$
+
+$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c \equiv c}{
+ \Gamma \vdash c_1 \sim c_2
+}
+\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; [] \equiv i}{}
+\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; ([c_1 = c_2] \rc c) \equiv f \; c_1 \; c_2 \; (\mt{fold} \; f \; i \; c)}{}$$
+
+$$\infer{\Gamma \vdash \mt{map} \; (\lambda x \Rightarrow x) \; c \equiv c}{}
+\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; (\mt{map} \; f' \; c)
+ \equiv \mt{fold} \; (\lambda (x_1 :: \mt{Name}) (x_2 :: \kappa) \Rightarrow f \; x_1 \; (f' \; x_2)) \; i \; c}{}$$
+
+$$\infer{\Gamma \vdash \mt{map} \; f \; (c_1 \rc c_2) \equiv \mt{map} \; f \; c_1 \rc \mt{map} \; f \; c_2}{}$$
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 6748925a8c158e84a40b2e8f0142eaea7691d2f6 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 10:34:56 -0500
Subject: Typing
---
doc/manual.tex | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 92 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index cff270df..dec14cd2 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -201,6 +201,8 @@ In many contexts where record fields are expected, like in a projection $e.c$, a
A record type may be written $\{(c = c,)^*\}$, which elaborates to $\$[(c = c,)^*]$.
+The notation $[c_1, \ldots, c_n]$ is shorthand for $[c_1 = (), \ldots, c_n = ()]$.
+
A tuple type $(\tau_1, \ldots, \tau_n)$ expands to a record type $\{1 = \tau_1, \ldots, n = \tau_n\}$, with natural numbers as field names. A tuple pattern $(p_1, \ldots, p_n)$ expands to a rigid record pattern $\{1 = p_1, \ldots, n = p_n\}$. Positive natural numbers may be used in most places where field names would be allowed.
In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid [c \sim c]$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
@@ -241,6 +243,8 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash c \hookrightarrow C$ proves that record constructor $c$ decomposes into set $C$ of field names and record constructors.
\item $\Gamma \vdash c \equiv c$ proves the computational equivalence of two constructors. This is often called a \emph{definitional equality} in the world of type theory.
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
+\item $\Gamma \vdash p \leadsto \Gamma, \tau$ combines typing of patterns with calculation of which new variables they bind.
+\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
\end{itemize}
@@ -393,4 +397,92 @@ $$\infer{\Gamma \vdash \mt{map} \; (\lambda x \Rightarrow x) \; c \equiv c}{}
$$\infer{\Gamma \vdash \mt{map} \; f \; (c_1 \rc c_2) \equiv \mt{map} \; f \; c_1 \rc \mt{map} \; f \; c_2}{}$$
+\subsection{Typing}
+
+We assume the existence of a function $T$ assigning types to literal constants. It maps integer constants to $\mt{Basis}.\mt{int}$, float constants to $\mt{Basis}.\mt{float}$, and string constants to $\mt{Basis}.\mt{string}$.
+
+We also refer to a function $\mathcal I$, such that $\mathcal I(\tau)$ ``uses an oracle'' to instantiate all constructor function arguments at the beginning of $\tau$ that are marked implicit; i.e., replace $x_1 ::: \kappa_1 \to \ldots \to x_n ::: \kappa_n \to \tau$ with $[x_1 \mapsto c_1]\ldots[x_n \mapsto c_n]\tau$, where the $c_i$s are inferred and $\tau$ does not start like $x ::: \kappa \to \tau'$.
+
+$$\infer{\Gamma \vdash e : \tau : \tau}{
+ \Gamma \vdash e : \tau
+}
+\quad \infer{\Gamma \vdash e : \tau}{
+ \Gamma \vdash e : \tau'
+ & \Gamma \vdash \tau' \equiv \tau
+}
+\quad \infer{\Gamma \vdash \ell : T(\ell)}{}$$
+
+$$\infer{\Gamma \vdash x : \mathcal I(\tau)}{
+ x : \tau \in \Gamma
+}
+\quad \infer{\Gamma \vdash M.x : \mathcal I(\tau)}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{val} \; x) = \tau
+}
+\quad \infer{\Gamma \vdash X : \mathcal I(\tau)}{
+ X : \tau \in \Gamma
+}
+\quad \infer{\Gamma \vdash M.X : \mathcal I(\tau)}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{val} \; X) = \tau
+}$$
+
+$$\infer{\Gamma \vdash e_1 \; e_2 : \tau_2}{
+ \Gamma \vdash e_1 : \tau_1 \to \tau_2
+ & \Gamma \vdash e_2 : \tau_1
+}
+\quad \infer{\Gamma \vdash \lambda x : \tau_1 \Rightarrow e : \tau_1 \to \tau_2}{
+ \Gamma, x : \tau_1 \vdash e : \tau_2
+}$$
+
+$$\infer{\Gamma \vdash e [c] : [x \mapsto c]\tau}{
+ \Gamma \vdash e : x :: \kappa \to \tau
+ & \Gamma \vdash c :: \kappa
+}
+\quad \infer{\Gamma \vdash \lambda x \; ? \; \kappa \Rightarrow e : x \; ? \; \kappa \to \tau}{
+ \Gamma, x :: \kappa \vdash e : \tau
+}$$
+
+$$\infer{\Gamma \vdash \{\overline{c = e}\} : \{\overline{c : \tau}\}}{
+ \forall i: \Gamma \vdash c_i :: \mt{Name}
+ & \Gamma \vdash e_i : \tau_i
+ & \forall i \neq j: \Gamma \vdash c_i \sim c_j
+}
+\quad \infer{\Gamma \vdash e.c : \tau}{
+ \Gamma \vdash e : \$([c = \tau] \rc c')
+}
+\quad \infer{\Gamma \vdash e_1 \rc e_2 : \$(c_1 \rc c_2)}{
+ \Gamma \vdash e_1 : \$c_1
+ & \Gamma \vdash e_2 : \$c_2
+}$$
+
+$$\infer{\Gamma \vdash e \rcut c : \$c'}{
+ \Gamma \vdash e : \$([c = \tau] \rc c')
+}
+\quad \infer{\Gamma \vdash e \rcutM c : \$c'}{
+ \Gamma \vdash e : \$(c \rc c')
+}$$
+
+$$\infer{\Gamma \vdash \mt{fold} : \begin{array}{c}
+ x_1 :: (\{\kappa\} \to \tau)
+ \to (x_2 :: \mt{Name} \to x_3 :: \kappa \to x_4 :: \{\kappa\} \to \lambda [[x_2] \sim x_4]
+ \Rightarrow x_1 \; x_4 \to x_1 \; ([x_2 = x_3] \rc x_4)) \\
+ \to x_1 \; [] \to x_5 :: \{\kappa\} \to x_1 \; x_5
+ \end{array}}{}$$
+
+$$\infer{\Gamma \vdash \mt{let} \; \overline{ed} \; \mt{in} \; e \; \mt{end} : \tau}{
+ \Gamma \vdash \overline{ed} \leadsto \Gamma'
+ & \Gamma' \vdash e : \tau
+}
+\quad \infer{\Gamma \vdash \mt{case} \; e \; \mt{of} \; \overline{p \Rightarrow e} : \tau}{
+ \forall i: \Gamma \vdash p_i \leadsto \Gamma_i, \tau'
+ & \Gamma_i \vdash e_i : \tau
+}$$
+
+$$\infer{\Gamma \vdash [c_1 \sim c_2] \Rightarrow e : [c_1 \sim c_2] \Rightarrow \tau}{
+ \Gamma \vdash c_1 :: \{\kappa\}
+ & \Gamma \vdash c_2 :: \{\kappa\}
+ & \Gamma, c_1 \sim c_2 \vdash e : \tau
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 73de524554aaa11c454e95cec39e8ada98c44cf4 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 10:49:47 -0500
Subject: Pattern typing
---
doc/manual.tex | 40 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 38 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index dec14cd2..db679405 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -243,7 +243,7 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash c \hookrightarrow C$ proves that record constructor $c$ decomposes into set $C$ of field names and record constructors.
\item $\Gamma \vdash c \equiv c$ proves the computational equivalence of two constructors. This is often called a \emph{definitional equality} in the world of type theory.
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
-\item $\Gamma \vdash p \leadsto \Gamma, \tau$ combines typing of patterns with calculation of which new variables they bind.
+\item $\Gamma \vdash p \leadsto \Gamma; \tau$ combines typing of patterns with calculation of which new variables they bind.
\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
@@ -397,7 +397,7 @@ $$\infer{\Gamma \vdash \mt{map} \; (\lambda x \Rightarrow x) \; c \equiv c}{}
$$\infer{\Gamma \vdash \mt{map} \; f \; (c_1 \rc c_2) \equiv \mt{map} \; f \; c_1 \rc \mt{map} \; f \; c_2}{}$$
-\subsection{Typing}
+\subsection{Expression Typing}
We assume the existence of a function $T$ assigning types to literal constants. It maps integer constants to $\mt{Basis}.\mt{int}$, float constants to $\mt{Basis}.\mt{float}$, and string constants to $\mt{Basis}.\mt{string}$.
@@ -485,4 +485,40 @@ $$\infer{\Gamma \vdash [c_1 \sim c_2] \Rightarrow e : [c_1 \sim c_2] \Rightarrow
& \Gamma, c_1 \sim c_2 \vdash e : \tau
}$$
+\subsection{Pattern Typing}
+
+$$\infer{\Gamma \vdash \_ \leadsto \Gamma; \tau}{}
+\quad \infer{\Gamma \vdash x \leadsto \Gamma, x : \tau; \tau}{}
+\quad \infer{\Gamma \vdash \ell \leadsto \Gamma; T(\ell)}{}$$
+
+$$\infer{\Gamma \vdash X \leadsto \Gamma; \overline{[x_i \mapsto \tau'_i]}\tau}{
+ X : \overline{x ::: \mt{Type}} \to \tau \in \Gamma
+ & \textrm{$\tau$ not a function type}
+}
+\quad \infer{\Gamma \vdash X \; p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau}{
+ X : \overline{x ::: \mt{Type}} \to \tau'' \to \tau \in \Gamma
+ & \Gamma \vdash p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau''
+}$$
+
+$$\infer{\Gamma \vdash M.X \leadsto \Gamma; \overline{[x_i \mapsto \tau'_i]}\tau}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau
+ & \textrm{$\tau$ not a function type}
+}$$
+
+$$\infer{\Gamma \vdash M.X \; p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau'' \to \tau
+ & \Gamma \vdash p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau''
+}$$
+
+$$\infer{\Gamma \vdash \{\overline{x = p}\} \leadsto \Gamma_n; \{\overline{x = \tau}\}}{
+ \Gamma_0 = \Gamma
+ & \forall i: \Gamma_i \vdash p_i \leadsto \Gamma_{i+1}; \tau_i
+}
+\quad \infer{\Gamma \vdash \{\overline{x = p}, \ldots\} \leadsto \Gamma_n; \$([\overline{x = \tau}] \rc c)}{
+ \Gamma_0 = \Gamma
+ & \forall i: \Gamma_i \vdash p_i \leadsto \Gamma_{i+1}; \tau_i
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From e2c7097ddf12808ae9f108e911e93ab99e640d80 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 11:33:51 -0500
Subject: Declaration typing
---
doc/manual.tex | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 81 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index db679405..4df95230 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -181,8 +181,8 @@ $$\begin{array}{rrcll}
&&& \mt{open} \; \mt{constraints} \; M & \textrm{inclusion of just the constraints from a module} \\
&&& \mt{table} \; x : c & \textrm{SQL table} \\
&&& \mt{sequence} \; x & \textrm{SQL sequence} \\
- &&& \mt{class} \; x = c & \textrm{concrete type class} \\
&&& \mt{cookie} \; x : \tau & \textrm{HTTP cookie} \\
+ &&& \mt{class} \; x = c & \textrm{concrete type class} \\
\\
\textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \textrm{constant} \\
&&& X & \textrm{variable} \\
@@ -245,8 +245,10 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
\item $\Gamma \vdash p \leadsto \Gamma; \tau$ combines typing of patterns with calculation of which new variables they bind.
\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations.
+\item $\Gamma \vdash S$ is the signature validity judgment.
+\item $\Gamma \vdash S \leq S$ is the signature compatibility judgment.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
-\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
+\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{datatype} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
\end{itemize}
\subsection{Kinding}
@@ -521,4 +523,81 @@ $$\infer{\Gamma \vdash \{\overline{x = p}\} \leadsto \Gamma_n; \{\overline{x = \
& \forall i: \Gamma_i \vdash p_i \leadsto \Gamma_{i+1}; \tau_i
}$$
+\subsection{Declaration Typing}
+
+We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'$, expressing the enrichment of $\Gamma$ with the types of the datatype constructors $\overline{dc}$, when they are known to belong to datatype $x$ with type parameters $\overline{y}$.
+
+This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. In the compiler, a set of available type classes and their instances is maintained, and these instances are used to fill in expression wildcards.
+
+We presuppose the existence of a function $\mathcal O$, where $\mathcal(M, S)$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature $S$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $S$.
+
+$$\infer{\Gamma \vdash \cdot \leadsto \Gamma}{}
+\quad \infer{\Gamma \vdash d, \overline{d} \leadsto \Gamma''}{
+ \Gamma \vdash d \leadsto \Gamma'
+ & \Gamma' \vdash \overline{d} \leadsto \Gamma''
+}$$
+
+$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa = c}{
+ \Gamma \vdash c :: \kappa
+}
+\quad \infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leadsto \Gamma'}{
+ \overline{y}; x; \Gamma, x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type} \vdash \overline{dc} \leadsto \Gamma'
+}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leadsto \Gamma'}{
+ \Gamma \vdash M : S
+ & \mt{proj}(M, S, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
+ & \overline{y}; x; \Gamma, x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type} = M.z \vdash \overline{dc} \leadsto \Gamma'
+}$$
+
+$$\infer{\Gamma \vdash \mt{val} \; x : \tau = e \leadsto \Gamma, x : \tau}{
+ \Gamma \vdash e : \tau
+}$$
+
+$$\infer{\Gamma \vdash \mt{val} \; \mt{rec} \; \overline{x : \tau = e} \leadsto \Gamma, \overline{x : \tau}}{
+ \forall i: \Gamma, \overline{x : \tau} \vdash e_i : \tau_i
+ & \textrm{$e_i$ starts with an expression $\lambda$, optionally preceded by constructor and disjointness $\lambda$s}
+}$$
+
+$$\infer{\Gamma \vdash \mt{structure} \; X : S = M \leadsto \Gamma, X : S}{
+ \Gamma \vdash M : S
+}
+\quad \infer{\Gamma \vdash \mt{siganture} \; X = S \leadsto \Gamma, X = S}{
+ \Gamma \vdash S
+}$$
+
+$$\infer{\Gamma \vdash \mt{open} \; M \leadsto \Gamma, \mathcal O(M, S)}{
+ \Gamma \vdash M : S
+}$$
+
+$$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma}{
+ \Gamma \vdash c_1 :: \{\kappa\}
+ & \Gamma \vdash c_2 :: \{\kappa\}
+ & \Gamma \vdash c_1 \sim c_2
+}
+\quad \infer{\Gamma \vdash \mt{open} \; \mt{constraints} \; M \leadsto \Gamma, \mathcal O_c(M, S)}{
+ \Gamma \vdash M : S
+}$$
+
+$$\infer{\Gamma \vdash \mt{table} \; x : c \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_table} \; c}{
+ \Gamma \vdash c :: \{\mt{Type}\}
+}
+\quad \infer{\Gamma \vdash \mt{sequence} \; x \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_sequence}}{}$$
+
+$$\infer{\Gamma \vdash \mt{cookie} \; x : \tau \leadsto \Gamma, x : \mt{Basis}.\mt{http\_cookie} \; \tau}{
+ \Gamma \vdash \tau :: \mt{Type}
+}$$
+
+$$\infer{\Gamma \vdash \mt{class} \; x = c \leadsto \Gamma, x :: \mt{Type} \to \mt{Type} = c}{
+ \Gamma \vdash c :: \mt{Type} \to \mt{Type}
+}$$
+
+$$\infer{\overline{y}; x; \Gamma \vdash \cdot \leadsto \Gamma}{}
+\quad \infer{\overline{y}; x; \Gamma \vdash X \mid \overline{dc} \leadsto \Gamma', X : \overline{y ::: \mt{Type}} \to x \; \overline{y}}{
+ \overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'
+}
+\quad \infer{\overline{y}; x; \Gamma \vdash X \; \mt{of} \; \tau \mid \overline{dc} \leadsto \Gamma', X : \overline{y ::: \mt{Type}} \to \tau \to x \; \overline{y}}{
+ \overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 022c9806c7c5d74195c0bc654c4f064384cb1d42 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 12:58:58 -0500
Subject: Signature compatibility
---
doc/manual.tex | 36 ++++++++++++++++++++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 4df95230..2c8379d5 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -245,8 +245,7 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
\item $\Gamma \vdash p \leadsto \Gamma; \tau$ combines typing of patterns with calculation of which new variables they bind.
\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations.
-\item $\Gamma \vdash S$ is the signature validity judgment.
-\item $\Gamma \vdash S \leq S$ is the signature compatibility judgment.
+\item $\Gamma \vdash S \leq S$ is the signature compatibility judgment. We write $\Gamma \vdash S$ as shorthand for $\Gamma \vdash S \leq S$.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{datatype} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
\end{itemize}
@@ -600,4 +599,37 @@ $$\infer{\overline{y}; x; \Gamma \vdash \cdot \leadsto \Gamma}{}
\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'
}$$
+\subsection{Signature Compatibility}
+
+$$\infer{\Gamma \vdash S \equiv S}{}
+\quad \infer{\Gamma \vdash S_1 \equiv S_2}{
+ \Gamma \vdash S_2 \equiv S_1
+}
+\quad \infer{\Gamma \vdash X \equiv S}{
+ X = S \in \Gamma
+}
+\quad \infer{\Gamma \vdash M.X \equiv S}{
+ \Gamma \vdash M : S'
+ & \mt{proj}(M, S', \mt{signature} \; X) = S
+}$$
+
+$$\infer{\Gamma \vdash S \; \mt{where} \; \mt{con} \; x = c \equiv \mt{sig} \; \overline{s^1} \; \mt{con} \; x :: \kappa = c \; \overline{s_2} \; \mt{end}}{
+ \Gamma \vdash S \equiv \mt{sig} \; \overline{s^1} \; \mt{con} \; x :: \kappa \; \overline{s_2} \; \mt{end}
+ & \Gamma \vdash c :: \kappa
+}$$
+
+$$\infer{\Gamma \vdash S_1 \leq S_2}{
+ \Gamma \vdash S_1 \equiv S_2
+}
+\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s} \; \mt{end} \leq \mt{sig} \; \mt{end}}{}
+\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s^1} \; s \; \overline{s^2} \; \mt{end} \leq \mt{sig} \; s' \; \overline{s} \; \mt{end}}{
+ \Gamma \vdash s \leq s'; \Gamma'
+ & \Gamma' \vdash \mt{sig} \; \overline{s^1} \; s \; \overline{s^2} \; \mt{end} \leq \mt{sig} \; \overline{s} \; \mt{end}
+}$$
+
+$$\infer{\Gamma \vdash \mt{functor} (X : S_1) : S_2 \leq \mt{functor} (X : S'_1) : S'_2}{
+ \Gamma \vdash S'_1 \leq S_1
+ & \Gamma, X : S'_1 \vdash S_2 \leq S'_2
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 509cd9c3d6cb02ff1d23a831979208e327668432 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 13:50:53 -0500
Subject: Signature compatibility
---
doc/manual.tex | 191 ++++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 162 insertions(+), 29 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 2c8379d5..ed41acaa 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -244,10 +244,11 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash c \equiv c$ proves the computational equivalence of two constructors. This is often called a \emph{definitional equality} in the world of type theory.
\item $\Gamma \vdash e : \tau$ is a standard typing judgment.
\item $\Gamma \vdash p \leadsto \Gamma; \tau$ combines typing of patterns with calculation of which new variables they bind.
-\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations.
+\item $\Gamma \vdash d \leadsto \Gamma$ expresses how a declaration modifies a context. We overload this judgment to apply to sequences of declarations, as well as to signature items and sequences of signature items.
+\item $\Gamma \vdash S \equiv S$ is the signature equivalence judgment.
\item $\Gamma \vdash S \leq S$ is the signature compatibility judgment. We write $\Gamma \vdash S$ as shorthand for $\Gamma \vdash S \leq S$.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
-\item $\mt{proj}(M, S, V)$ is a partial function for projecting a signature item from a signature $S$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{datatype} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items of $S$.
+\item $\mt{proj}(M, \overline{s}, V)$ is a partial function for projecting a signature item from $\overline{s}$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{datatype} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items from $\overline{s}$.
\end{itemize}
\subsection{Kinding}
@@ -263,12 +264,12 @@ $$\infer{\Gamma \vdash (c) :: \kappa :: \kappa}{
}$$
$$\infer{\Gamma \vdash M.x :: \kappa}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{con} \; x) = \kappa
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{con} \; x) = \kappa
}
\quad \infer{\Gamma \vdash M.x :: \kappa}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{con} \; x) = (\kappa, c)
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{con} \; x) = (\kappa, c)
}$$
$$\infer{\Gamma \vdash \tau_1 \to \tau_2 :: \mt{Type}}{
@@ -374,8 +375,8 @@ $$\infer{\Gamma \vdash x \equiv c}{
x :: \kappa = c \in \Gamma
}
\quad \infer{\Gamma \vdash M.x \equiv c}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{con} \; x) = (\kappa, c)
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{con} \; x) = (\kappa, c)
}
\quad \infer{\Gamma \vdash (\overline c).i \equiv c_i}{}$$
@@ -417,15 +418,15 @@ $$\infer{\Gamma \vdash x : \mathcal I(\tau)}{
x : \tau \in \Gamma
}
\quad \infer{\Gamma \vdash M.x : \mathcal I(\tau)}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{val} \; x) = \tau
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{val} \; x) = \tau
}
\quad \infer{\Gamma \vdash X : \mathcal I(\tau)}{
X : \tau \in \Gamma
}
\quad \infer{\Gamma \vdash M.X : \mathcal I(\tau)}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{val} \; X) = \tau
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{val} \; X) = \tau
}$$
$$\infer{\Gamma \vdash e_1 \; e_2 : \tau_2}{
@@ -502,14 +503,14 @@ $$\infer{\Gamma \vdash X \leadsto \Gamma; \overline{[x_i \mapsto \tau'_i]}\tau}{
}$$
$$\infer{\Gamma \vdash M.X \leadsto \Gamma; \overline{[x_i \mapsto \tau'_i]}\tau}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau
& \textrm{$\tau$ not a function type}
}$$
$$\infer{\Gamma \vdash M.X \; p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau'' \to \tau
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{val} \; X) = \overline{x ::: \mt{Type}} \to \tau'' \to \tau
& \Gamma \vdash p \leadsto \Gamma'; \overline{[x_i \mapsto \tau'_i]}\tau''
}$$
@@ -528,7 +529,9 @@ We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \lead
This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. In the compiler, a set of available type classes and their instances is maintained, and these instances are used to fill in expression wildcards.
-We presuppose the existence of a function $\mathcal O$, where $\mathcal(M, S)$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature $S$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $S$.
+We presuppose the existence of a function $\mathcal O$, where $\mathcal(M, \overline{s})$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature items $\overline{s}$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $S$.
+
+We write $\kappa_1^n \to \kappa$ as a shorthand, where $\kappa_1^0 \to \kappa = \kappa$ and $\kappa_1^{n+1} \to \kappa_2 = \kappa_1 \to (\kappa_1^n \to \kappa_2)$. We write $\mt{len}(\overline{y})$ for the length of vector $\overline{y}$ of variables.
$$\infer{\Gamma \vdash \cdot \leadsto \Gamma}{}
\quad \infer{\Gamma \vdash d, \overline{d} \leadsto \Gamma''}{
@@ -544,8 +547,8 @@ $$\infer{\Gamma \vdash \mt{con} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa
}$$
$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leadsto \Gamma'}{
- \Gamma \vdash M : S
- & \mt{proj}(M, S, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
& \overline{y}; x; \Gamma, x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type} = M.z \vdash \overline{dc} \leadsto \Gamma'
}$$
@@ -561,12 +564,12 @@ $$\infer{\Gamma \vdash \mt{val} \; \mt{rec} \; \overline{x : \tau = e} \leadsto
$$\infer{\Gamma \vdash \mt{structure} \; X : S = M \leadsto \Gamma, X : S}{
\Gamma \vdash M : S
}
-\quad \infer{\Gamma \vdash \mt{siganture} \; X = S \leadsto \Gamma, X = S}{
+\quad \infer{\Gamma \vdash \mt{signature} \; X = S \leadsto \Gamma, X = S}{
\Gamma \vdash S
}$$
-$$\infer{\Gamma \vdash \mt{open} \; M \leadsto \Gamma, \mathcal O(M, S)}{
- \Gamma \vdash M : S
+$$\infer{\Gamma \vdash \mt{open} \; M \leadsto \Gamma, \mathcal O(M, \overline{s})}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
}$$
$$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma}{
@@ -574,8 +577,8 @@ $$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma}{
& \Gamma \vdash c_2 :: \{\kappa\}
& \Gamma \vdash c_1 \sim c_2
}
-\quad \infer{\Gamma \vdash \mt{open} \; \mt{constraints} \; M \leadsto \Gamma, \mathcal O_c(M, S)}{
- \Gamma \vdash M : S
+\quad \infer{\Gamma \vdash \mt{open} \; \mt{constraints} \; M \leadsto \Gamma, \mathcal O_c(M, \overline{s})}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
}$$
$$\infer{\Gamma \vdash \mt{table} \; x : c \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_table} \; c}{
@@ -599,8 +602,62 @@ $$\infer{\overline{y}; x; \Gamma \vdash \cdot \leadsto \Gamma}{}
\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'
}$$
+\subsection{Signature Item Typing}
+
+We appeal to a signature item analogue of the $\mathcal O$ function from the last subsection.
+
+$$\infer{\Gamma \vdash \cdot \leadsto \Gamma}{}
+\quad \infer{\Gamma \vdash s, \overline{s} \leadsto \Gamma''}{
+ \Gamma \vdash s \leadsto \Gamma'
+ & \Gamma' \vdash \overline{s} \leadsto \Gamma''
+}$$
+
+$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa \leadsto \Gamma, x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{con} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa = c}{
+ \Gamma \vdash c :: \kappa
+}
+\quad \infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leadsto \Gamma'}{
+ \overline{y}; x; \Gamma, x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type} \vdash \overline{dc} \leadsto \Gamma'
+}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leadsto \Gamma'}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
+ & \overline{y}; x; \Gamma, x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type} = M.z \vdash \overline{dc} \leadsto \Gamma'
+}$$
+
+$$\infer{\Gamma \vdash \mt{val} \; x : \tau \leadsto \Gamma, x : \tau}{
+ \Gamma \vdash \tau :: \mt{Type}
+}$$
+
+$$\infer{\Gamma \vdash \mt{structure} \; X : S \leadsto \Gamma, X : S}{
+ \Gamma \vdash S
+}
+\quad \infer{\Gamma \vdash \mt{signature} \; X = S \leadsto \Gamma, X = S}{
+ \Gamma \vdash S
+}$$
+
+$$\infer{\Gamma \vdash \mt{include} \; S \leadsto \Gamma, \mathcal O(\overline{s})}{
+ \Gamma \vdash S
+ & \Gamma \vdash S \equiv \mt{sig} \; \overline{s} \; \mt{end}
+}$$
+
+$$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma, c_1 \sim c_2}{
+ \Gamma \vdash c_1 :: \{\kappa\}
+ & \Gamma \vdash c_2 :: \{\kappa\}
+}$$
+
+$$\infer{\Gamma \vdash \mt{class} \; x = c \leadsto \Gamma, x :: \mt{Type} \to \mt{Type} = c}{
+ \Gamma \vdash c :: \mt{Type} \to \mt{Type}
+}
+\quad \infer{\Gamma \vdash \mt{class} \; x \leadsto \Gamma, x :: \mt{Type} \to \mt{Type}}{}$$
+
\subsection{Signature Compatibility}
+To simplify the judgments in this section, we assume that all signatures are alpha-varied as necessary to avoid including mmultiple bindings for the same identifier. This is in addition to the usual alpha-variation of locally-bound variables.
+
+We rely on a judgment $\Gamma \vdash \overline{s} \leq s'$, which expresses the occurrence in signature items $\overline{s}$ of an item compatible with $s'$. We also use a judgment $\Gamma \vdash \overline{dc} \leq \overline{dc}$, which expresses compatibility of datatype definitions.
+
$$\infer{\Gamma \vdash S \equiv S}{}
\quad \infer{\Gamma \vdash S_1 \equiv S_2}{
\Gamma \vdash S_2 \equiv S_1
@@ -609,22 +666,34 @@ $$\infer{\Gamma \vdash S \equiv S}{}
X = S \in \Gamma
}
\quad \infer{\Gamma \vdash M.X \equiv S}{
- \Gamma \vdash M : S'
- & \mt{proj}(M, S', \mt{signature} \; X) = S
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{signature} \; X) = S
}$$
$$\infer{\Gamma \vdash S \; \mt{where} \; \mt{con} \; x = c \equiv \mt{sig} \; \overline{s^1} \; \mt{con} \; x :: \kappa = c \; \overline{s_2} \; \mt{end}}{
\Gamma \vdash S \equiv \mt{sig} \; \overline{s^1} \; \mt{con} \; x :: \kappa \; \overline{s_2} \; \mt{end}
& \Gamma \vdash c :: \kappa
+}
+\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s^1} \; \mt{include} \; S \; \overline{s^2} \; \mt{end} \equiv \mt{sig} \; \overline{s^1} \; \overline{s} \; \overline{s^2} \; \mt{end}}{
+ \Gamma \vdash S \equiv \mt{sig} \; \overline{s} \; \mt{end}
}$$
$$\infer{\Gamma \vdash S_1 \leq S_2}{
\Gamma \vdash S_1 \equiv S_2
}
\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s} \; \mt{end} \leq \mt{sig} \; \mt{end}}{}
-\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s^1} \; s \; \overline{s^2} \; \mt{end} \leq \mt{sig} \; s' \; \overline{s} \; \mt{end}}{
- \Gamma \vdash s \leq s'; \Gamma'
- & \Gamma' \vdash \mt{sig} \; \overline{s^1} \; s \; \overline{s^2} \; \mt{end} \leq \mt{sig} \; \overline{s} \; \mt{end}
+\quad \infer{\Gamma \vdash \mt{sig} \; \overline{s} \; \mt{end} \leq \mt{sig} \; s' \; \overline{s'} \; \mt{end}}{
+ \Gamma \vdash \overline{s} \leq s'
+ & \Gamma \vdash s' \leadsto \Gamma'
+ & \Gamma' \vdash \mt{sig} \; \overline{s} \; \mt{end} \leq \mt{sig} \; \overline{s'} \; \mt{end}
+}$$
+
+$$\infer{\Gamma \vdash s \; \overline{s} \leq s'}{
+ \Gamma \vdash s \leq s'
+}
+\quad \infer{\Gamma \vdash s \; \overline{s} \leq s'}{
+ \Gamma \vdash s \leadsto \Gamma'
+ & \Gamma' \vdash \overline{s} \leq s'
}$$
$$\infer{\Gamma \vdash \mt{functor} (X : S_1) : S_2 \leq \mt{functor} (X : S'_1) : S'_2}{
@@ -632,4 +701,68 @@ $$\infer{\Gamma \vdash \mt{functor} (X : S_1) : S_2 \leq \mt{functor} (X : S'_1)
& \Gamma, X : S'_1 \vdash S_2 \leq S'_2
}$$
+$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa \leq \mt{con} \; x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{con} \; x :: \kappa = c \leq \mt{con} \; x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leq \mt{con} \; x :: \mt{Type}}{}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{con} \; x :: \mt{Type}^{\mt{len}(y)} \to \mt{Type}}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
+}$$
+
+$$\infer{\Gamma \vdash \mt{class} \; x \leq \mt{con} \; x :: \mt{Type} \to \mt{Type}}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x = c \leq \mt{con} \; x :: \mt{Type} \to \mt{Type}}{}$$
+
+$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \mt{\kappa} = c_2}{
+ \Gamma \vdash c_1 \equiv c_2
+}
+\quad \infer{\Gamma \vdash \mt{class} \; x = c_1 \leq \mt{con} \; x :: \mt{Type} \to \mt{Type} = c_2}{
+ \Gamma \vdash c_1 \equiv c_2
+}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leq \mt{datatype} \; x \; \overline{y} = \overline{dc'}}{
+ \Gamma, \overline{y :: \mt{Type}} \vdash \overline{dc} \leq \overline{dc'}
+}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{datatype} \; x \; \overline{y} = \overline{dc'}}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
+ & \Gamma, \overline{y :: \mt{Type}} \vdash \overline{dc} \leq \overline{dc'}
+}$$
+
+$$\infer{\Gamma \vdash \cdot \leq \cdot}{}
+\quad \infer{\Gamma \vdash X; \overline{dc} \leq X; \overline{dc'}}{
+ \Gamma \vdash \overline{dc} \leq \overline{dc'}
+}
+\quad \infer{\Gamma \vdash X \; \mt{of} \; \tau_1; \overline{dc} \leq X \; \mt{of} \; \tau_2; \overline{dc'}}{
+ \Gamma \vdash \tau_1 \equiv \tau_2
+ & \Gamma \vdash \overline{dc} \leq \overline{dc'}
+}$$
+
+$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{datatype} \; x = \mt{datatype} \; M'.z'}{
+ \Gamma \vdash M.z \equiv M'.z'
+}$$
+
+$$\infer{\Gamma \vdash \mt{val} \; x : \tau_1 \leq \mt{val} \; x : \tau_2}{
+ \Gamma \vdash \tau_1 \equiv \tau_2
+}
+\quad \infer{\Gamma \vdash \mt{structure} \; X : S_1 \leq \mt{structure} \; X : S_2}{
+ \Gamma \vdash S_1 \leq S_2
+}
+\quad \infer{\Gamma \vdash \mt{signature} \; X = S_1 \leq \mt{signature} \; X = S_2}{
+ \Gamma \vdash S_1 \leq S_2
+ & \Gamma \vdash S_2 \leq S_1
+}$$
+
+$$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leq \mt{constraint} \; c'_1 \sim c'_2}{
+ \Gamma \vdash c_1 \equiv c'_1
+ & \Gamma \vdash c_2 \equiv c'_2
+}$$
+
+$$\infer{\Gamma \vdash \mt{class} \; x \leq \mt{class} \; x}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x = c \leq \mt{class} \; x}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x = c_1 \leq \mt{class} \; x = c_2}{
+ \Gamma \vdash c_1 \equiv c_2
+}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 2cf99ae8367d64360d18f7e838f905419f4c80ef Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 14:09:43 -0500
Subject: Module typing
---
doc/manual.tex | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index ed41acaa..53a2b787 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -765,4 +765,54 @@ $$\infer{\Gamma \vdash \mt{class} \; x \leq \mt{class} \; x}{}
\Gamma \vdash c_1 \equiv c_2
}$$
+\subsection{Module Typing}
+
+We use a helper function $\mt{sigOf}$, which converts declarations and sequences of declarations into their principal signature items and sequences of signature items, respectively.
+
+$$\infer{\Gamma \vdash M : S}{
+ \Gamma \vdash M : S'
+ & \Gamma \vdash S' \leq S
+}
+\quad \infer{\Gamma \vdash \mt{struct} \; \overline{d} \; \mt{end} : \mt{sig} \; \mt{sigOf}(\overline{d}) \; \mt{end}}{
+ \Gamma \vdash \overline{d} \leadsto \Gamma'
+}
+\quad \infer{\Gamma \vdash X : S}{
+ X : S \in \Gamma
+}$$
+
+$$\infer{\Gamma \vdash M.X : S}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
+ & \mt{proj}(M, \overline{s}, \mt{structure} \; X) = S
+}$$
+
+$$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
+ \Gamma \vdash M_1 : \mt{functor}(X : S_1) : S_2
+ & \Gamma \vdash M_2 : S_1
+}
+\quad \infer{\Gamma \vdash \mt{functor} (X : S_1) : S_2 = M : \mt{functor} (X : S_1) : S_2}{
+ \Gamma \vdash S_1
+ & \Gamma, X : S_1 \vdash S_2
+ & \Gamma, X : S_1 \vdash M : S_2
+}$$
+
+\begin{eqnarray*}
+ \mt{sigOf}(\cdot) &=& \cdot \\
+ \mt{sigOf}(s \; \overline{s'}) &=& \mt{sigOf}(s) \; \mt{sigOf}(\overline{s'}) \\
+ \\
+ \mt{sigOf}(\mt{con} \; x :: \kappa = c) &=& \mt{con} \; x :: \kappa = c \\
+ \mt{sigOf}(\mt{datatype} \; x \; \overline{y} = \overline{dc}) &=& \mt{datatype} \; x \; \overline{y} = \overline{dc} \\
+ \mt{sigOf}(\mt{datatype} \; x = \mt{datatype} \; M.z) &=& \mt{datatype} \; x = \mt{datatype} \; M.z \\
+ \mt{sigOf}(\mt{val} \; x : \tau = e) &=& \mt{val} \; x : \tau \\
+ \mt{sigOf}(\mt{val} \; \mt{rec} \; \overline{x : \tau = e}) &=& \overline{\mt{val} \; x : \tau} \\
+ \mt{sigOf}(\mt{structure} \; X : S = M) &=& \mt{structure} \; X : S \\
+ \mt{sigOf}(\mt{signature} \; X = S) &=& \mt{signature} \; X = S \\
+ \mt{sigOf}(\mt{open} \; M) &=& \mt{include} \; S \textrm{ (where $\Gamma \vdash M : S$)} \\
+ \mt{sigOf}(\mt{constraint} \; c_1 \sim c_2) &=& \mt{constraint} \; c_1 \sim c_2 \\
+ \mt{sigOf}(\mt{open} \; \mt{constraints} \; M) &=& \cdot \\
+ \mt{sigOf}(\mt{table} \; x : c) &=& \mt{table} \; x : c \\
+ \mt{sigOf}(\mt{sequence} \; x) &=& \mt{sequence} \; x \\
+ \mt{sigOf}(\mt{cookie} \; x : \tau) &=& \mt{cookie} \; x : \tau \\
+ \mt{sigOf}(\mt{class} \; x = c) &=& \mt{class} \; x = c \\
+\end{eqnarray*}
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From bcddef561bc3d980de9cbec25605accb2334c115 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 14:32:33 -0500
Subject: selfify
---
doc/manual.tex | 25 ++++++++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 53a2b787..eac33bc6 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -249,6 +249,7 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\Gamma \vdash S \leq S$ is the signature compatibility judgment. We write $\Gamma \vdash S$ as shorthand for $\Gamma \vdash S \leq S$.
\item $\Gamma \vdash M : S$ is the module signature checking judgment.
\item $\mt{proj}(M, \overline{s}, V)$ is a partial function for projecting a signature item from $\overline{s}$, given the module $M$ that we project from. $V$ may be $\mt{con} \; x$, $\mt{datatype} \; x$, $\mt{val} \; x$, $\mt{signature} \; X$, or $\mt{structure} \; X$. The parameter $M$ is needed because the projected signature item may refer to other items from $\overline{s}$.
+\item $\mt{selfify}(M, \overline{s})$ adds information to signature items $\overline{s}$ to reflect the fact that we are concerned with the particular module $M$. This function is overloaded to work over individual signature items as well.
\end{itemize}
\subsection{Kinding}
@@ -563,8 +564,13 @@ $$\infer{\Gamma \vdash \mt{val} \; \mt{rec} \; \overline{x : \tau = e} \leadsto
$$\infer{\Gamma \vdash \mt{structure} \; X : S = M \leadsto \Gamma, X : S}{
\Gamma \vdash M : S
+ & \textrm{ ($M$ not a $\mt{struct} \; \ldots \; \mt{end}$)}
}
-\quad \infer{\Gamma \vdash \mt{signature} \; X = S \leadsto \Gamma, X = S}{
+\quad \infer{\Gamma \vdash \mt{structure} \; X : S = \mt{struct} \; \overline{d} \; \mt{end} \leadsto \Gamma, X : \mt{selfify}(X, \overline{s})}{
+ \Gamma \vdash \mt{struct} \; \overline{d} \; \mt{end} : \mt{sig} \; \overline{s} \; \mt{end}
+}$$
+
+$$\infer{\Gamma \vdash \mt{signature} \; X = S \leadsto \Gamma, X = S}{
\Gamma \vdash S
}$$
@@ -815,4 +821,21 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{sigOf}(\mt{class} \; x = c) &=& \mt{class} \; x = c \\
\end{eqnarray*}
+\begin{eqnarray*}
+ \mt{selfify}(M, \cdot) &=& \cdot \\
+ \mt{selfify}(M, s \; \overline{s'}) &=& \mt{selfify}(M, \sigma, s) \; \mt{selfify}(M, \overline{s'}) \\
+ \\
+ \mt{selfify}(M, \mt{con} \; x :: \kappa) &=& \mt{con} \; x :: \kappa = M.x \\
+ \mt{selfify}(M, \mt{con} \; x :: \kappa = c) &=& \mt{con} \; x :: \kappa = c \\
+ \mt{selfify}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc}) &=& \mt{datatype} \; x \; \overline{y} = \mt{datatype} \; M.x \\
+ \mt{selfify}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z) &=& \mt{datatype} \; x = \mt{datatype} \; M'.z \\
+ \mt{selfify}(M, \mt{val} \; x : \tau) &=& \mt{val} \; x : \tau \\
+ \mt{selfify}(M, \mt{structure} \; X : S) &=& \mt{structure} \; X : \mt{selfify}(M.X, \overline{s}) \textrm{ (where $\Gamma \vdash S \equiv \mt{sig} \; \overline{s} \; \mt{end}$)} \\
+ \mt{selfify}(M, \mt{signature} \; X = S) &=& \mt{signature} \; X = S \\
+ \mt{selfify}(M, \mt{include} \; S) &=& \mt{include} \; S \\
+ \mt{selfify}(M, \mt{constraint} \; c_1 \sim c_2) &=& \mt{constraint} \; c_1 \sim c_2 \\
+ \mt{selfify}(M, \mt{class} \; x) &=& \mt{class} \; x = M.x \\
+ \mt{selfify}(M, \mt{class} \; x = c) &=& \mt{class} \; x = c \\
+\end{eqnarray*}
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From bd43499d17cec3123d5462233ea487b41e77a80f Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 29 Nov 2008 15:04:57 -0500
Subject: Module projection
---
.hgignore | 1 +
doc/manual.tex | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 42 insertions(+)
(limited to 'doc')
diff --git a/.hgignore b/.hgignore
index fe5b6659..4e578224 100644
--- a/.hgignore
+++ b/.hgignore
@@ -32,3 +32,4 @@ demo/demo.*
*.dvi
*.pdf
*.ps
+*.toc
diff --git a/doc/manual.tex b/doc/manual.tex
index eac33bc6..713bbe60 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -15,6 +15,8 @@
\maketitle
+\tableofcontents
+
\section{Ur Syntax}
In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
@@ -838,4 +840,43 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{selfify}(M, \mt{class} \; x = c) &=& \mt{class} \; x = c \\
\end{eqnarray*}
+\subsection{Module Projection}
+
+\begin{eqnarray*}
+ \mt{proj}(M, \mt{con} \; x :: \kappa \; \overline{s}, \mt{con} \; x) &=& \kappa \\
+ \mt{proj}(M, \mt{con} \; x :: \kappa = c \; \overline{s}, \mt{con} \; x) &=& (\kappa, c) \\
+ \mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{con} \; x) &=& \mt{Type}^{\mt{len}(\overline{y})} \to \mt{Type} \\
+ \mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z \; \overline{s}, \mt{con} \; x) &=& (\mt{Type}^{\mt{len}(\overline{y})} \to \mt{Type}, M'.z) \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
+ && \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})$)} \\
+ \mt{proj}(M, \mt{class} \; x \; \overline{s}, \mt{con} \; x) &=& \mt{Type} \to \mt{Type} \\
+ \mt{proj}(M, \mt{class} \; x = c \; \overline{s}, \mt{con} \; x) &=& (\mt{Type} \to \mt{Type}, c) \\
+ \\
+ \mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{datatype} \; x) &=& (\overline{y}, \overline{dc}) \\
+ \mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z \; \overline{s}, \mt{con} \; x) &=& \mt{proj}(M', \overline{s'}, \mt{datatype} \; z) \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$)} \\
+ \\
+ \mt{proj}(M, \mt{val} \; x : \tau \; \overline{s}, \mt{val} \; x) &=& \tau \\
+ \mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to M.x \; \overline y \textrm{ (where $X \in \overline{dc}$)} \\
+ \mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to \tau \to M.x \; \overline y \textrm{ (where $X \; \mt{of} \; \tau \in \overline{dc}$)} \\
+ \mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to M.x \; \overline y \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
+ && \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z = (\overline{y}, \overline{dc})$ and $X \in \overline{dc}$)} \\
+ \mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to \tau \to M.x \; \overline y \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
+ && \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z = (\overline{y}, \overline{dc})$ and $X : \tau \in \overline{dc}$)} \\
+ \\
+ \mt{proj}(M, \mt{structure} \; X : S \; \overline{s}, \mt{structure} \; X) &=& S \\
+ \\
+ \mt{proj}(M, \mt{signature} \; X = S \; \overline{s}, \mt{signature} \; X) &=& S \\
+ \\
+ \mt{proj}(M, \mt{con} \; x :: \kappa \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{con} \; x :: \kappa = c \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{val} \; x : \tau \; \overline{s}, V) &=& \mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{structure} \; X : S \; \overline{s}, V) &=& [X \mapsto M.X]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{signature} \; X = S \; \overline{s}, V) &=& [X \mapsto M.X]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{include} \; S \; \overline{s}, V) &=& \mt{proj}(M, \overline{s'} \; \overline{s}, V) \textrm{ (where $\Gamma \vdash S \equiv \mt{sig} \; \overline{s'} \; \mt{end}$)} \\
+ \mt{proj}(M, \mt{constraint} \; c_1 \sim c_2 \; \overline{s}, V) &=& \mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{class} \; x \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{class} \; x = c \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+\end{eqnarray*}
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 154fd594ccb664b07b27d63bd9ffee41801dd4d1 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 6 Dec 2008 12:01:12 -0500
Subject: Type inference
---
doc/manual.tex | 40 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 38 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 713bbe60..8ef6a889 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -358,7 +358,7 @@ $$\infer{\Gamma \vdash c \hookrightarrow \{c\}}{}
\Gamma \vdash c \hookrightarrow C
}$$
-\subsection{Definitional Equality}
+\subsection{\label{definitional}Definitional Equality}
We use $\mathcal C$ to stand for a one-hole context that, when filled, yields a constructor. The notation $\mathcal C[c]$ plugs $c$ into $\mathcal C$. We omit the standard definition of one-hole contexts. We write $[x \mapsto c_1]c_2$ for capture-avoiding substitution of $c_1$ for $x$ in $c_2$.
@@ -530,7 +530,7 @@ $$\infer{\Gamma \vdash \{\overline{x = p}\} \leadsto \Gamma_n; \{\overline{x = \
We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'$, expressing the enrichment of $\Gamma$ with the types of the datatype constructors $\overline{dc}$, when they are known to belong to datatype $x$ with type parameters $\overline{y}$.
-This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. In the compiler, a set of available type classes and their instances is maintained, and these instances are used to fill in expression wildcards.
+This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. Section \ref{typeclasses} gives an informal description of how type classes influence type inference.
We presuppose the existence of a function $\mathcal O$, where $\mathcal(M, \overline{s})$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature items $\overline{s}$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $S$.
@@ -879,4 +879,40 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{proj}(M, \mt{class} \; x = c \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
\end{eqnarray*}
+
+\section{Type Inference}
+
+The Ur/Web compiler uses \emph{heuristic type inference}, with no claims of completeness with respect to the declarative specification of the last section. The rules in use seem to work well in practice. This section summarizes those rules, to help Ur programmers predict what will work and what won't.
+
+\subsection{Basic Unification}
+
+Type-checkers for languages based on the Hindly-Milner type discipline, like ML and Haskell, take advantage of \emph{principal typing} properties, making complete type inference relatively straightforward. Inference algorithms are traditionally implemented using type unification variables, at various points asserting equalities between types, in the process discovering the values of type variables. The Ur/Web compiler uses the same basic strategy, but the complexity of the type system rules out easy completeness.
+
+Type-checking can require evaluating recursive functional programs, thanks to the type-level $\mt{fold}$ operator. When a unification variable appears in such a type, the next step of computation can be undetermined. The value of that variable might be determined later, but this would be ``too late'' for the unification problems generated at the first occurrence. This is the essential source of incompletness.
+
+Nonetheless, the unification engine tends to do reasonably well. Unlike in ML, polymorphism is never inferred in definitions; it must be indicated explicitly by writing out constructor-level parameters. By writing these and other annotations, the programmer can generally get the type inference engine to do most of the type reconstruction work.
+
+\subsection{Unifying Record Types}
+
+The type inference engine tries to take advantage of the algebraic rules governing type-level records, as shown in Section \ref{definitional}. When two constructors of record kind are unified, they are reduce to normal forms, with like terms crossed off from each normal form until, hopefully, nothing remains. This cannot be complete, with the inclusion of unification variables. The type-checker can help you understand what goes wrong when the process fails, as it outputs the unmatched remainders of the two normal forms.
+
+\subsection{\label{typeclasses}Type Classes}
+
+Ur includes a type class facility inspired by Haskell's. The current version is very rudimentary, only supporting instances for particular types built up from abstract types and datatypes and type-level application.
+
+Type classes are integrated with the module system. A type class is just a constructor of kind $\mt{Type} \to \mt{Type}$. By marking such a constructor $c$ as a type class, the programmer instructs the type inference engine to, in each scope, record all values of types $c \; \tau$ as \emph{instances}. Any function argument whose type is of such a form is treated as implicit, to be determined by examining the current instance database.
+
+The ``dictionary encoding'' often used in Haskell implementations is made explicit in Ur. Type class instances are just properly-typed values, and they can also be considered as ``proofs'' of membership in the class. In some cases, it is useful to pass these proofs around explicitly. An underscore written where a proof is expected will also be inferred, if possible, from the current instance database.
+
+Just as for types, type classes may be exported from modules, and they may be exported as concrete or abstract. Concrete type classes have their ``real'' definitions exposed, so that client code may add new instances freely. Abstract type classes are useful as ``predicates'' that can be used to enforce invariants, as we will see in some definitions of SQL syntax in the Ur/Web standard library.
+
+\subsection{Reverse-Engineering Record Types}
+
+It's useful to write Ur functions and functors that take record constructors as inputs, but these constructors can grow quite long, even though their values are often implied by other arguments. The compiler uses a simple heuristic to infer the values of unification variables that are folded over, yielding known results. Often, as in the case of $\mt{map}$-like folds, the base and recursive cases of a fold produce constructors with different top-level structure. Thus, if the result of the fold is known, examining its top-level structure reveals whether the record being folded over is empty or not. If it's empty, we're done; if it's not empty, we replace a single unification variable with a new constructor formed from three new unification variables, as in $[\alpha = \beta] \rc \gamma$. This process can often be repeated to determine a unification variable fully.
+
+\subsection{Implicit Arguments in Functor Applications}
+
+Constructor, constraint, and type class witness members of structures may be omitted, when those structures are used in contexts where their assigned signatures imply how to fill in those missing members. This feature combines well with reverse-engineering to allow for uses of complicated meta-programming functors with little more code than would be necessary to invoke an untyped, ad-hoc code generator.
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 718a8e9fc7cd60f227f56e6031c7e9ac054cb488 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 6 Dec 2008 13:04:48 -0500
Subject: Start of Ur/Web library
---
doc/manual.tex | 47 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 47 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 8ef6a889..894287e1 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -915,4 +915,51 @@ It's useful to write Ur functions and functors that take record constructors as
Constructor, constraint, and type class witness members of structures may be omitted, when those structures are used in contexts where their assigned signatures imply how to fill in those missing members. This feature combines well with reverse-engineering to allow for uses of complicated meta-programming functors with little more code than would be necessary to invoke an untyped, ad-hoc code generator.
+\section{The Ur Standard Library}
+
+The built-in parts of the Ur/Web standard library are described by the signature in \texttt{lib/basis.urs} in the distribution. A module $\mt{Basis}$ ascribing to that signature is available in the initial environment, and every program is implicitly prefixed by $\mt{open} \; \mt{Basis}$.
+
+Additionally, other common functions that are definable within Ur are included in \texttt{lib/top.urs} and \texttt{lib/top.ur}. This $\mt{Top}$ module is also opened implicitly.
+
+The idea behind Ur is to serve as the ideal host for embedded domain-specific languages. For now, however, the ``generic'' functionality is intermixed with Ur/Web-specific functionality, including in these two library modules. We hope that these generic library components have types that speak for themselves. The next section introduces the Ur/Web-specific elements. Here, we only give the type declarations from the beginning of $\mt{Basis}$.
+
+$$\begin{array}{l}
+ \mt{type} \; \mt{int} \\
+ \mt{type} \; \mt{float} \\
+ \mt{type} \; \mt{string} \\
+ \mt{type} \; \mt{time} \\
+ \\
+ \mt{type} \; \mt{unit} = \{\} \\
+ \\
+ \mt{datatype} \; \mt{bool} = \mt{False} \mid \mt{True} \\
+ \\
+ \mt{datatype} \; \mt{option} \; \mt{t} = \mt{None} \mid \mt{Some} \; \mt{of} \; \mt{t}
+\end{array}$$
+
+
+\section{The Ur/Web Standard Library}
+
+\subsection{Transactions}
+
+Ur is a pure language; we use Haskell's trick to support controlled side effects. The standard library defines a monad $\mt{transaction}$, meant to stand for actions that may be undone cleanly. By design, no other kinds of actions are supported.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{transaction} :: \mt{Type} \to \mt{Type} \\
+ \\
+ \mt{val} \; \mt{return} : \mt{t} ::: \mt{Type} \to \mt{t} \to \mt{transaction} \; \mt{t} \\
+ \mt{val} \; \mt{bind} : \mt{t_1} ::: \mt{Type} \to \mt{t_2} ::: \mt{Type} \to \mt{transaction} \; \mt{t_1} \to (\mt{t_1} \to \mt{transaction} \; \mt{t_2}) \to \mt{transaction} \; \mt{t_2}
+\end{array}$$
+
+\subsection{HTTP}
+
+There are transactions for reading an HTTP header by name and for getting and setting strongly-typed cookies. Cookies may only be created by the $\mt{cookie}$ declaration form, ensuring that they be named consistently based on module structure.
+
+$$\begin{array}{l}
+\mt{val} \; \mt{requestHeader} : \mt{string} \to \mt{transaction} \; (\mt{option} \; \mt{string}) \\
+\\
+\mt{con} \; \mt{http\_cookie} :: \mt{Type} \to \mt{Type} \\
+\mt{val} \; \mt{getCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{transaction} \; (\mt{option} \; \mt{t}) \\
+\mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 80bbc587e8c3e897cb30f0723187950254c6632b Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 09:19:53 -0500
Subject: Start of sql_exp
---
doc/manual.tex | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 85 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 894287e1..0a0bdc88 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -962,4 +962,89 @@ $$\begin{array}{l}
\mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit}
\end{array}$$
+\subsection{SQL}
+
+The fundamental unit of interest in the embedding of SQL is tables, described by a type family and creatable only via the $\mt{table}$ declaration form.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_table} :: \{\mt{Type}\} \to \mt{Type}
+\end{array}$$
+
+\subsubsection{Queries}
+
+A final query is constructed via the $\mt{sql\_query}$ function. Constructor arguments respectively specify the table fields we select (as records mapping tables to the subsets of their fields that we choose) and the (always named) extra expressions that we select.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_query} :: \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_query} : \mt{tables} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \{\mt{Rows} : \mt{sql\_query1} \; \mt{tables} \; \mt{selectedFields} \; \mt{selectedExps}, \\
+ \hspace{.2in} \mt{OrderBy} : \mt{sql\_order\_by} \; \mt{tables} \; \mt{selectedExps}, \\
+ \hspace{.2in} \mt{Limit} : \mt{sql\_limit}, \\
+ \hspace{.2in} \mt{Offset} : \mt{sql\_offset}\} \\
+ \hspace{.1in} \to \mt{sql\_query} \; \mt{selectedFields} \; \mt{selectedExps}
+\end{array}$$
+
+Most of the complexity of the query encoding is in the type $\mt{sql\_query1}$, which includes simple queries and derived queries based on relational operators. Constructor arguments respectively specify the tables we select from, the subset of fields that we keep from each table for the result rows, and the extra expressions that we select.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_query1} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
+ \\
+ \mt{type} \; \mt{sql\_relop} \\
+ \mt{val} \; \mt{sql\_union} : \mt{sql\_relop} \\
+ \mt{val} \; \mt{sql\_intersect} : \mt{sql\_relop} \\
+ \mt{val} \; \mt{sql\_except} : \mt{sql\_relop} \\
+ \mt{val} \; \mt{sql\_relop} : \mt{tables1} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{tables2} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \mt{sql\_relop} \\
+ \hspace{.1in} \to \mt{sql\_query1} \; \mt{tables1} \; \mt{selectedFields} \; \mt{selectedExps} \\
+ \hspace{.1in} \to \mt{sql\_query1} \; \mt{tables2} \; \mt{selectedFields} \; \mt{selectedExps} \\
+ \hspace{.1in} \to \mt{sql\_query1} \; \mt{selectedFields} \; \mt{selectedFields} \; \mt{selectedExps}
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_query1} : \mt{tables} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{grouped} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \{\mt{From} : \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: \{\mt{Type}\}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_table} \; \mt{fields}] \rc \mt{acc}) \; [] \; \mt{tables}), \\
+ \hspace{.2in} \mt{Where} : \mt{sql\_exp} \; \mt{tables} \; [] \; [] \; \mt{bool}, \\
+ \hspace{.2in} \mt{GroupBy} : \mt{sql\_subset} \; \mt{tables} \; \mt{grouped}, \\
+ \hspace{.2in} \mt{Having} : \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{bool}, \\
+ \hspace{.2in} \mt{SelectFields} : \mt{sql\_subset} \; \mt{grouped} \; \mt{selectedFields}, \\
+ \hspace{.2in} \mt {SelectExps} : \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{selectedExps}) \} \\
+ \hspace{.1in} \to \mt{sql\_query1} \; \mt{tables} \; \mt{selectedFields} \; \mt{selectedExps}
+\end{array}$$
+
+To encode projection of subsets of fields in $\mt{SELECT}$ clauses, and to encode $\mt{GROUP} \; \mt{BY}$ clauses, we rely on a type family $\mt{sql\_subset}$, capturing what it means for one record of table fields to be a subset of another. The main constructor $\mt{sql\_subset}$ ``proves subset facts'' by requiring a split of a record into kept and dropped parts. The extra constructor $\mt{sql\_subset\_all}$ is a convenience for keeping all fields of a record.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_subset} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_subset} : \mt{keep\_drop} :: \{(\{\mt{Type}\} \times \{\mt{Type}\})\} \\
+ \hspace{.1in} \to \mt{sql\_subset} \\
+ \hspace{.2in} (\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\})) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \; [\mt{fields}.1 \sim \mt{fields}.2] \Rightarrow \\
+ \hspace{.3in} [\mt{nm} = \mt{fields}.1 \rc \mt{fields}.2] \rc \mt{acc}) \; [] \; \mt{keep\_drop}) \\
+ \hspace{.2in} (\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\})) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{fields}.1] \rc \mt{acc}) \; [] \; \mt{keep\_drop}) \\
+\mt{val} \; \mt{sql\_subset\_all} : \mt{tables} :: \{\{\mt{Type}\}\} \to \mt{sql\_subset} \; \mt{tables} \; \mt{tables}
+\end{array}$$
+
+SQL expressions are used in several places, including $\mt{SELECT}$, $\mt{WHERE}$, $\mt{HAVING}$, and $\mt{ORDER} \; \mt{BY}$ clauses. They reify a fragment of the standard SQL expression language, while making it possible to inject ``native'' Ur values in some places. The arguments to the $\mt{sql\_exp}$ type family respectively give the unrestricted-availablity table fields, the table fields that may only be used in arguments to aggregate functions, the available selected expressions, and the type of the expression.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_exp} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \to \mt{Type}
+\end{array}$$
+
+Any field in scope may be converted to an expression.
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_field} : \mt{otherTabs} ::: \{\{\mt{Type}\}\} \to \mt{otherFields} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \mt{fieldType} ::: \mt{Type} \to \mt{agg} ::: \{\{\mt{Type}\}\} \\
+ \hspace{.1in} \to \mt{exps} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \mt{tab} :: \mt{Name} \to \mt{field} :: \mt{Name} \\
+ \hspace{.1in} \to \mt{sql\_exp} \; ([\mt{tab} = [\mt{field} = \mt{fieldType}] \rc \mt{otherFields}] \rc \mt{otherTabs}) \; \mt{agg} \; \mt{exps} \; \mt{fieldType}
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 6da109f29357054c27022d363819edd5da94206c Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 10:02:04 -0500
Subject: Finish documenting queries; remove a stray [unit] argument
---
doc/manual.tex | 122 +++++++++++++++++++++++++++++++++++++++++++++++++++-----
lib/basis.urs | 2 +-
src/monoize.sml | 3 +-
src/urweb.grm | 3 +-
4 files changed, 116 insertions(+), 14 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 0a0bdc88..fb6b3b01 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -922,7 +922,6 @@ The built-in parts of the Ur/Web standard library are described by the signature
Additionally, other common functions that are definable within Ur are included in \texttt{lib/top.urs} and \texttt{lib/top.ur}. This $\mt{Top}$ module is also opened implicitly.
The idea behind Ur is to serve as the ideal host for embedded domain-specific languages. For now, however, the ``generic'' functionality is intermixed with Ur/Web-specific functionality, including in these two library modules. We hope that these generic library components have types that speak for themselves. The next section introduces the Ur/Web-specific elements. Here, we only give the type declarations from the beginning of $\mt{Basis}$.
-
$$\begin{array}{l}
\mt{type} \; \mt{int} \\
\mt{type} \; \mt{float} \\
@@ -942,7 +941,6 @@ $$\begin{array}{l}
\subsection{Transactions}
Ur is a pure language; we use Haskell's trick to support controlled side effects. The standard library defines a monad $\mt{transaction}$, meant to stand for actions that may be undone cleanly. By design, no other kinds of actions are supported.
-
$$\begin{array}{l}
\mt{con} \; \mt{transaction} :: \mt{Type} \to \mt{Type} \\
\\
@@ -953,7 +951,6 @@ $$\begin{array}{l}
\subsection{HTTP}
There are transactions for reading an HTTP header by name and for getting and setting strongly-typed cookies. Cookies may only be created by the $\mt{cookie}$ declaration form, ensuring that they be named consistently based on module structure.
-
$$\begin{array}{l}
\mt{val} \; \mt{requestHeader} : \mt{string} \to \mt{transaction} \; (\mt{option} \; \mt{string}) \\
\\
@@ -965,7 +962,6 @@ $$\begin{array}{l}
\subsection{SQL}
The fundamental unit of interest in the embedding of SQL is tables, described by a type family and creatable only via the $\mt{table}$ declaration form.
-
$$\begin{array}{l}
\mt{con} \; \mt{sql\_table} :: \{\mt{Type}\} \to \mt{Type}
\end{array}$$
@@ -973,7 +969,6 @@ $$\begin{array}{l}
\subsubsection{Queries}
A final query is constructed via the $\mt{sql\_query}$ function. Constructor arguments respectively specify the table fields we select (as records mapping tables to the subsets of their fields that we choose) and the (always named) extra expressions that we select.
-
$$\begin{array}{l}
\mt{con} \; \mt{sql\_query} :: \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
\mt{val} \; \mt{sql\_query} : \mt{tables} ::: \{\{\mt{Type}\}\} \\
@@ -987,7 +982,6 @@ $$\begin{array}{l}
\end{array}$$
Most of the complexity of the query encoding is in the type $\mt{sql\_query1}$, which includes simple queries and derived queries based on relational operators. Constructor arguments respectively specify the tables we select from, the subset of fields that we keep from each table for the result rows, and the extra expressions that we select.
-
$$\begin{array}{l}
\mt{con} \; \mt{sql\_query1} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
\\
@@ -1020,7 +1014,6 @@ $$\begin{array}{l}
\end{array}$$
To encode projection of subsets of fields in $\mt{SELECT}$ clauses, and to encode $\mt{GROUP} \; \mt{BY}$ clauses, we rely on a type family $\mt{sql\_subset}$, capturing what it means for one record of table fields to be a subset of another. The main constructor $\mt{sql\_subset}$ ``proves subset facts'' by requiring a split of a record into kept and dropped parts. The extra constructor $\mt{sql\_subset\_all}$ is a convenience for keeping all fields of a record.
-
$$\begin{array}{l}
\mt{con} \; \mt{sql\_subset} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \mt{Type} \\
\mt{val} \; \mt{sql\_subset} : \mt{keep\_drop} :: \{(\{\mt{Type}\} \times \{\mt{Type}\})\} \\
@@ -1032,13 +1025,11 @@ $$\begin{array}{l}
\end{array}$$
SQL expressions are used in several places, including $\mt{SELECT}$, $\mt{WHERE}$, $\mt{HAVING}$, and $\mt{ORDER} \; \mt{BY}$ clauses. They reify a fragment of the standard SQL expression language, while making it possible to inject ``native'' Ur values in some places. The arguments to the $\mt{sql\_exp}$ type family respectively give the unrestricted-availablity table fields, the table fields that may only be used in arguments to aggregate functions, the available selected expressions, and the type of the expression.
-
$$\begin{array}{l}
\mt{con} \; \mt{sql\_exp} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \to \mt{Type}
\end{array}$$
Any field in scope may be converted to an expression.
-
$$\begin{array}{l}
\mt{val} \; \mt{sql\_field} : \mt{otherTabs} ::: \{\{\mt{Type}\}\} \to \mt{otherFields} ::: \{\mt{Type}\} \\
\hspace{.1in} \to \mt{fieldType} ::: \mt{Type} \to \mt{agg} ::: \{\{\mt{Type}\}\} \\
@@ -1047,4 +1038,117 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{sql\_exp} \; ([\mt{tab} = [\mt{field} = \mt{fieldType}] \rc \mt{otherFields}] \rc \mt{otherTabs}) \; \mt{agg} \; \mt{exps} \; \mt{fieldType}
\end{array}$$
+There is an analogous function for referencing named expressions.
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_exp} : \mt{tabs} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{t} ::: \mt{Type} \to \mt{rest} ::: \{\mt{Type}\} \to \mt{nm} :: \mt{Name} \\
+ \hspace{.1in} \to \mt{sql\_exp} \; \mt{tabs} \; \mt{agg} \; ([\mt{nm} = \mt{t}] \rc \mt{rest}) \; \mt{t}
+\end{array}$$
+
+Ur values of appropriate types may be injected into SQL expressions.
+$$\begin{array}{l}
+ \mt{class} \; \mt{sql\_injectable} \\
+ \mt{val} \; \mt{sql\_bool} : \mt{sql\_injectable} \; \mt{bool} \\
+ \mt{val} \; \mt{sql\_int} : \mt{sql\_injectable} \; \mt{int} \\
+ \mt{val} \; \mt{sql\_float} : \mt{sql\_injectable} \; \mt{float} \\
+ \mt{val} \; \mt{sql\_string} : \mt{sql\_injectable} \; \mt{string} \\
+ \mt{val} \; \mt{sql\_time} : \mt{sql\_injectable} \; \mt{time} \\
+ \mt{val} \; \mt{sql\_option\_bool} : \mt{sql\_injectable} \; (\mt{option} \; \mt{bool}) \\
+ \mt{val} \; \mt{sql\_option\_int} : \mt{sql\_injectable} \; (\mt{option} \; \mt{int}) \\
+ \mt{val} \; \mt{sql\_option\_float} : \mt{sql\_injectable} \; (\mt{option} \; \mt{float}) \\
+ \mt{val} \; \mt{sql\_option\_string} : \mt{sql\_injectable} \; (\mt{option} \; \mt{string}) \\
+ \mt{val} \; \mt{sql\_option\_time} : \mt{sql\_injectable} \; (\mt{option} \; \mt{time}) \\
+ \mt{val} \; \mt{sql\_inject} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \to \mt{sql\_injectable} \; \mt{t} \\
+ \hspace{.1in} \to \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t}
+\end{array}$$
+
+We have the SQL nullness test, which is necessary because of the strange SQL semantics of equality in the presence of null values.
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_is\_null} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; (\mt{option} \; \mt{t}) \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{bool}
+\end{array}$$
+
+We have generic nullary, unary, and binary operators, as well as comparison operators.
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_nfunc} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_current\_timestamp} : \mt{sql\_nfunc} \; \mt{time} \\
+ \mt{val} \; \mt{sql\_nfunc} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_nfunc} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t} \\\end{array}$$
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_unary} :: \mt{Type} \to \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_not} : \mt{sql\_unary} \; \mt{bool} \; \mt{bool} \\
+ \mt{val} \; \mt{sql\_unary} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{arg} ::: \mt{Type} \to \mt{res} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_unary} \; \mt{arg} \; \mt{res} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{arg} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{res} \\
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_binary} :: \mt{Type} \to \mt{Type} \to \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_and} : \mt{sql\_binary} \; \mt{bool} \; \mt{bool} \; \mt{bool} \\
+ \mt{val} \; \mt{sql\_or} : \mt{sql\_binary} \; \mt{bool} \; \mt{bool} \; \mt{bool} \\
+ \mt{val} \; \mt{sql\_binary} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{arg_1} ::: \mt{Type} \to \mt{arg_2} ::: \mt{Type} \to \mt{res} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_binary} \; \mt{arg_1} \; \mt{arg_2} \; \mt{res} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{arg_1} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{arg_2} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{res}
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{type} \; \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_eq} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_ne} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_lt} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_le} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_gt} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_ge} : \mt{sql\_comparison} \\
+ \mt{val} \; \mt{sql\_comparison} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_comparison} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{bool}
+ \end{array}$$
+
+Finally, we have aggregate functions. The $\mt{COUNT(\ast)}$ syntax is handled specially, since it takes no real argument. The other aggregate functions are placed into a general type family, using type classes to restrict usage to properly-typed arguments. The key aspect of the $\mt{sql\_aggregate}$ function's type is the shift of aggregate-function-only fields into unrestricted fields.
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_count} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{int}
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_aggregate} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_aggregate} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_aggregate} \; \mt{t} \to \mt{sql\_exp} \; \mt{agg} \; \mt{agg} \; \mt{exps} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t}
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{class} \; \mt{sql\_summable} \\
+ \mt{val} \; \mt{sql\_summable\_int} : \mt{sql\_summable} \; \mt{int} \\
+ \mt{val} \; \mt{sql\_summable\_float} : \mt{sql\_summable} \; \mt{float} \\
+ \mt{val} \; \mt{sql\_avg} : \mt{t} ::: \mt{Type} \to \mt{sql\_summable} \; \mt{t} \to \mt{sql\_aggregate} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_sum} : \mt{t} ::: \mt{Type} \to \mt{sql\_summable} \mt{t} \to \mt{sql\_aggregate} \; \mt{t}
+\end{array}$$
+
+$$\begin{array}{l}
+ \mt{class} \; \mt{sql\_maxable} \\
+ \mt{val} \; \mt{sql\_maxable\_int} : \mt{sql\_maxable} \; \mt{int} \\
+ \mt{val} \; \mt{sql\_maxable\_float} : \mt{sql\_maxable} \; \mt{float} \\
+ \mt{val} \; \mt{sql\_maxable\_string} : \mt{sql\_maxable} \; \mt{string} \\
+ \mt{val} \; \mt{sql\_maxable\_time} : \mt{sql\_maxable} \; \mt{time} \\
+ \mt{val} \; \mt{sql\_max} : \mt{t} ::: \mt{Type} \to \mt{sql\_maxable} \; \mt{t} \to \mt{sql\_aggregate} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_min} : \mt{t} ::: \mt{Type} \to \mt{sql\_maxable} \; \mt{t} \to \mt{sql\_aggregate} \; \mt{t}
+\end{array}$$
+
+We wrap up the definition of query syntax with the types used in representing $\mt{ORDER} \; \mt{BY}$, $\mt{LIMIT}$, and $\mt{OFFSET}$ clauses.
+$$\begin{array}{l}
+ \mt{type} \; \mt{sql\_direction} \\
+ \mt{val} \; \mt{sql\_asc} : \mt{sql\_direction} \\
+ \mt{val} \; \mt{sql\_desc} : \mt{sql\_direction} \\
+ \\
+ \mt{con} \; \mt{sql\_order\_by} :: \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_order\_by\_Nil} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{exps} :: \{\mt{Type}\} \to \mt{sql\_order\_by} \; \mt{tables} \; \mt{exps} \\
+ \mt{val} \; \mt{sql\_order\_by\_Cons} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{sql\_exp} \; \mt{tables} \; [] \; \mt{exps} \; \mt{t} \to \mt{sql\_direction} \to \mt{sql\_order\_by} \; \mt{tables} \; \mt{exps} \to \mt{sql\_order\_by} \; \mt{tables} \; \mt{exps} \\
+ \\
+ \mt{type} \; \mt{sql\_limit} \\
+ \mt{val} \; \mt{sql\_no\_limit} : \mt{sql\_limit} \\
+ \mt{val} \; \mt{sql\_limit} : \mt{int} \to \mt{sql\_limit} \\
+ \\
+ \mt{type} \; \mt{sql\_offset} \\
+ \mt{val} \; \mt{sql\_no\_offset} : \mt{sql\_offset} \\
+ \mt{val} \; \mt{sql\_offset} : \mt{int} \to \mt{sql\_offset}
+\end{array}$$
+
\end{document}
\ No newline at end of file
diff --git a/lib/basis.urs b/lib/basis.urs
index 656c5b91..9681328f 100644
--- a/lib/basis.urs
+++ b/lib/basis.urs
@@ -232,7 +232,7 @@ val sql_comparison : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
-> sql_exp tables agg exps bool
val sql_count : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
- -> unit -> sql_exp tables agg exps int
+ -> sql_exp tables agg exps int
con sql_aggregate :: Type -> Type
val sql_aggregate : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
diff --git a/src/monoize.sml b/src/monoize.sml
index 28ea5946..cd20e366 100644
--- a/src/monoize.sml
+++ b/src/monoize.sml
@@ -1530,8 +1530,7 @@ fun monoExp (env, st, fm) (all as (e, loc)) =
(L.EFfi ("Basis", "sql_count"), _),
_), _),
_), _),
- _) => ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
- (L'.EPrim (Prim.String "COUNT(*)"), loc)), loc),
+ _) => ((L'.EPrim (Prim.String "COUNT(*)"), loc),
fm)
| L.ECApp (
diff --git a/src/urweb.grm b/src/urweb.grm
index 8a3bee7f..3d77905e 100644
--- a/src/urweb.grm
+++ b/src/urweb.grm
@@ -1267,8 +1267,7 @@ sqlexp : TRUE (sql_inject (EVar (["Basis"], "True", In
| COUNT LPAREN STAR RPAREN (let
val loc = s (COUNTleft, RPARENright)
in
- (EApp ((EVar (["Basis"], "sql_count", Infer), loc),
- (ERecord [], loc)), loc)
+ (EVar (["Basis"], "sql_count", Infer), loc)
end)
| sqlagg LPAREN sqlexp RPAREN (let
val loc = s (sqlaggleft, RPARENright)
--
cgit v1.2.3
From d86935ec25586bbba5b6aaf60fb93d20e99de964 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 10:24:23 -0500
Subject: DML
---
doc/manual.tex | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index fb6b3b01..83ce8867 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -981,6 +981,14 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{sql\_query} \; \mt{selectedFields} \; \mt{selectedExps}
\end{array}$$
+Queries are used by folding over their results inside transactions.
+$$\begin{array}{l}
+ \mt{val} \; \mt{query} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \lambda [\mt{tables} \sim \mt{exps}] \Rightarrow \mt{state} ::: \mt{Type} \to \mt{sql\_query} \; \mt{tables} \; \mt{exps} \\
+ \hspace{.1in} \to (\$(\mt{exps} \rc \mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: \{\mt{Type}\}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \$\mt{fields}] \rc \mt{acc}) \; [] \; \mt{tables}) \\
+ \hspace{.2in} \to \mt{state} \to \mt{transaction} \; \mt{state}) \\
+ \hspace{.1in} \to \mt{state} \to \mt{transaction} \; \mt{state}
+\end{array}$$
+
Most of the complexity of the query encoding is in the type $\mt{sql\_query1}$, which includes simple queries and derived queries based on relational operators. Constructor arguments respectively specify the tables we select from, the subset of fields that we keep from each table for the result rows, and the extra expressions that we select.
$$\begin{array}{l}
\mt{con} \; \mt{sql\_query1} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \\
@@ -1151,4 +1159,32 @@ $$\begin{array}{l}
\mt{val} \; \mt{sql\_offset} : \mt{int} \to \mt{sql\_offset}
\end{array}$$
+
+\subsubsection{DML}
+
+The Ur/Web library also includes an embedding of a fragment of SQL's DML, the Data Manipulation Language, for modifying database tables. Any piece of DML may be executed in a transaction.
+
+$$\begin{array}{l}
+ \mt{type} \; \mt{dml} \\
+ \mt{val} \; \mt{dml} : \mt{dml} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
+Properly-typed records may be used to form $\mt{INSERT}$ commands.
+$$\begin{array}{l}
+ \mt{val} \; \mt{insert} : \mt{fields} ::: \{\mt{Type}\} \to \mt{sql\_table} \; \mt{fields} \\
+ \hspace{.1in} \to \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; [] \; [] \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{fields}) \to \mt{dml}
+\end{array}$$
+
+An $\mt{UPDATE}$ command is formed from a choice of which table fields to leave alone and which to change, along with an expression to use to compute the new value of each changed field and a $\mt{WHERE}$ clause.
+$$\begin{array}{l}
+ \mt{val} \; \mt{update} : \mt{unchanged} ::: \{\mt{Type}\} \to \mt{changed} :: \{\mt{Type}\} \to \lambda [\mt{changed} \sim \mt{unchanged}] \\
+ \hspace{.1in} \Rightarrow \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; [\mt{T} = \mt{changed} \rc \mt{unchanged}] \; [] \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{changed}) \\
+ \hspace{.1in} \to \mt{sql\_table} \; (\mt{changed} \rc \mt{unchanged}) \to \mt{sql\_exp} \; [\mt{T} = \mt{changed} \rc \mt{unchanged}] \; [] \; [] \; \mt{bool} \to \mt{dml}
+\end{array}$$
+
+A $\mt{DELETE}$ command is formed from a table and a $\mt{WHERE}$ clause.
+$$\begin{array}{l}
+ \mt{val} \; \mt{delete} : \mt{fields} ::: \{\mt{Type}\} \to \mt{sql\_table} \; \mt{fields} \to \mt{sql\_exp} \; [\mt{T} = \mt{fields}] \; [] \; [] \; \mt{bool} \to \mt{dml}
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 41c63800f3c6f330002b29b133836f6e4f7a81d3 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 10:25:55 -0500
Subject: Sequences
---
doc/manual.tex | 10 ++++++++++
1 file changed, 10 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 83ce8867..95d2d548 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1187,4 +1187,14 @@ $$\begin{array}{l}
\mt{val} \; \mt{delete} : \mt{fields} ::: \{\mt{Type}\} \to \mt{sql\_table} \; \mt{fields} \to \mt{sql\_exp} \; [\mt{T} = \mt{fields}] \; [] \; [] \; \mt{bool} \to \mt{dml}
\end{array}$$
+\subsubsection{Sequences}
+
+SQL sequences are counters with concurrency control, often used to assign unique IDs. Ur/Web supports them via a simple interface. The only way to create a sequence is with the $\mt{sequence}$ declaration form.
+
+$$\begin{array}{l}
+ \mt{type} \; \mt{sql\_sequence} \\
+ \mt{val} \; \mt{nextval} : \mt{sql\_sequence} \to \mt{transaction} \; \mt{int}
+\end{array}$$
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From fe138022197bc0dede592fc1df97e1ef540c1b6a Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 10:59:14 -0500
Subject: XML
---
doc/manual.tex | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 95d2d548..0dc33a4d 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1197,4 +1197,52 @@ $$\begin{array}{l}
\end{array}$$
+\subsection{XML}
+
+Ur/Web's library contains an encoding of XML syntax and semantic constraints. We make no effort to follow the standards governing XML schemas. Rather, XML fragments are viewed more as values of ML datatypes, and we only track which tags are allowed inside which other tags.
+
+The basic XML type family has arguments respectively indicating the \emph{context} of a fragment, the fields that the fragment expects to be bound on entry (and their types), and the fields that the fragment will bind (and their types). Contexts are a record-based ``poor man's subtyping'' encoding, with each possible set of valid tags corresponding to a different context record. The arguments dealing with field binding are only relevant to HTML forms.
+$$\begin{array}{l}
+ \mt{con} \; \mt{xml} :: \{\mt{Unit}\} \to \{\mt{Type}\} \to \{\mt{Type}\} \to \mt{Type}
+\end{array}$$
+
+We also have a type family of XML tags, indexed respectively by the record of optional attributes accepted by the tag, the context in which the tag may be placed, the context required of children of the tag, which form fields the tag uses, and which fields the tag defines.
+$$\begin{array}{l}
+ \mt{con} \; \mt{tag} :: \{\mt{Type}\} \to \{\mt{Unit}\} \to \{\mt{Unit}\} \to \{\mt{Type}\} \to \{\mt{Type}\} \to \mt{Type}
+\end{array}$$
+
+Literal text may be injected into XML as ``CDATA.''
+$$\begin{array}{l}
+ \mt{val} \; \mt{cdata} : \mt{ctx} ::: \{\mt{Unit}\} \to \mt{use} ::: \{\mt{Type}\} \to \mt{string} \to \mt{xml} \; \mt{ctx} \; \mt{use} \; []
+\end{array}$$
+
+There is a function for producing an XML tree with a particular tag at its root.
+$$\begin{array}{l}
+ \mt{val} \; \mt{tag} : \mt{attrsGiven} ::: \{\mt{Type}\} \to \mt{attrsAbsent} ::: \{\mt{Type}\} \to \mt{ctxOuter} ::: \{\mt{Unit}\} \to \mt{ctxInner} ::: \{\mt{Unit}\} \\
+ \hspace{.1in} \to \mt{useOuter} ::: \{\mt{Type}\} \to \mt{useInner} ::: \{\mt{Type}\} \to \mt{bindOuter} ::: \{\mt{Type}\} \to \mt{bindInner} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \lambda [\mt{attrsGiven} \sim \mt{attrsAbsent}] \; [\mt{useOuter} \sim \mt{useInner}] \; [\mt{bindOuter} \sim \mt{bindInner}] \Rightarrow \$\mt{attrsGiven} \\
+ \hspace{.1in} \to \mt{tag} \; (\mt{attrsGiven} \rc \mt{attrsAbsent}) \; \mt{ctxOuter} \; \mt{ctxInner} \; \mt{useOuter} \; \mt{bindOuter} \\
+ \hspace{.1in} \to \mt{xml} \; \mt{ctxInner} \; \mt{useInner} \; \mt{bindInner} \to \mt{xml} \; \mt{ctxOuter} \; (\mt{useOuter} \rc \mt{useInner}) \; (\mt{bindOuter} \rc \mt{bindInner})
+\end{array}$$
+
+Two XML fragments may be concatenated.
+$$\begin{array}{l}
+ \mt{val} \; \mt{join} : \mt{ctx} ::: \{\mt{Unit}\} \to \mt{use_1} ::: \{\mt{Type}\} \to \mt{bind_1} ::: \{\mt{Type}\} \to \mt{bind_2} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \lambda [\mt{use_1} \sim \mt{bind_1}] \; [\mt{bind_1} \sim \mt{bind_2}] \\
+ \hspace{.1in} \Rightarrow \mt{xml} \; \mt{ctx} \; \mt{use_1} \; \mt{bind_1} \to \mt{xml} \; \mt{ctx} \; (\mt{use_1} \rc \mt{bind_1}) \; \mt{bind_2} \to \mt{xml} \; \mt{ctx} \; \mt{use_1} \; (\mt{bind_1} \rc \mt{bind_2})
+\end{array}$$
+
+Finally, any XML fragment may be updated to ``claim'' to use more form fields than it does.
+$$\begin{array}{l}
+ \mt{val} \; \mt{useMore} : \mt{ctx} ::: \{\mt{Unit}\} \to \mt{use_1} ::: \{\mt{Type}\} \to \mt{use_2} ::: \{\mt{Type}\} \to \mt{bind} ::: \{\mt{Type}\} \to \lambda [\mt{use_1} \sim \mt{use_2}] \\
+ \hspace{.1in} \Rightarrow \mt{xml} \; \mt{ctx} \; \mt{use_1} \; \mt{bind} \to \mt{xml} \; \mt{ctx} \; (\mt{use_1} \rc \mt{use_2}) \; \mt{bind}
+\end{array}$$
+
+We will not list here the different HTML tags and related functions from the standard library. They should be easy enough to understand from the code in \texttt{basis.urs}. The set of tags in the library is not yet claimed to be complete for HTML standards.
+
+One last useful function is for aborting any page generation, returning some XML as an error message. This function takes the place of some uses of a general exception mechanism.
+$$\begin{array}{l}
+ \mt{val} \; \mt{error} : \mt{t} ::: \mt{Type} \to \mt{xml} \; [\mt{Body}] \; [] \; [] \to \mt{t}
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From b45bf9b187a61b2a803555025e1d6496144a9759 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 12:02:54 -0500
Subject: Query syntax
---
doc/manual.tex | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 0dc33a4d..79cda554 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1245,4 +1245,58 @@ $$\begin{array}{l}
\mt{val} \; \mt{error} : \mt{t} ::: \mt{Type} \to \mt{xml} \; [\mt{Body}] \; [] \; [] \to \mt{t}
\end{array}$$
+
+\section{Ur/Web Syntax Extensions}
+
+Ur/Web features some syntactic shorthands for building values using the functions from the last section. This section sketches the grammar of those extensions. We write spans of syntax inside brackets to indicate that they are optional.
+
+\subsection{SQL}
+
+\subsubsection{Queries}
+
+$$\begin{array}{rrcll}
+ \textrm{Queries} & Q &::=& (q \; [\mt{ORDER} \; \mt{BY} \; (E \; [D],)^+] \; [\mt{LIMIT} \; N] \; [\mt{OFFSET} \; N]) \\
+ \textrm{Pre-queries} & q &::=& \mt{SELECT} \; P \; \mt{FROM} \; T,^+ \; [\mt{WHERE} \; E] \; [\mt{GROUP} \; \mt{BY} \; p,^+] \; [\mt{HAVING} \; E] \\
+ &&& \mid q \; R \; q \\
+ \textrm{Relational operators} & R &::=& \mt{UNION} \mid \mt{INTERSECT} \mid \mt{EXCEPT}
+\end{array}$$
+
+$$\begin{array}{rrcll}
+ \textrm{Projections} & P &::=& \ast & \textrm{all columns} \\
+ &&& p,^+ & \textrm{particular columns} \\
+ \textrm{Pre-projections} & p &::=& t.f & \textrm{one column from a table} \\
+ &&& t.\{\{c\}\} & \textrm{a record of colums from a table (of kind $\{\mt{Type}\}$)} \\
+ \textrm{Table names} & t &::=& x & \textrm{constant table name (automatically capitalized)} \\
+ &&& X & \textrm{constant table name} \\
+ &&& \{\{c\}\} & \textrm{computed table name (of kind $\mt{Name}$)} \\
+ \textrm{Column names} & f &::=& X & \textrm{constant column name} \\
+ &&& \{c\} & \textrm{computed column name (of kind $\mt{Name}$)} \\
+ \textrm{Tables} & T &::=& x & \textrm{table variable, named locally by its own capitalization} \\
+ &&& x \; \mt{AS} \; t & \textrm{table variable, with local name} \\
+ &&& \{\{e\}\} \; \mt{AS} \; t & \textrm{computed table expression, with local name} \\
+ \textrm{SQL expressions} & E &::=& p & \textrm{column references} \\
+ &&& X & \textrm{named expression references} \\
+ &&& \{\{e\}\} & \textrm{injected native Ur expressions} \\
+ &&& \{e\} & \textrm{computed expressions, probably using $\mt{sql\_exp}$ directly} \\
+ &&& \mt{TRUE} \mid \mt{FALSE} & \textrm{boolean constants} \\
+ &&& \ell & \textrm{primitive type literals} \\
+ &&& \mt{NULL} & \textrm{null value (injection of $\mt{None}$)} \\
+ &&& E \; \mt{IS} \; \mt{NULL} & \textrm{nullness test} \\
+ &&& n & \textrm{nullary operators} \\
+ &&& u \; E & \textrm{unary operators} \\
+ &&& E \; b \; E & \textrm{binary operators} \\
+ &&& \mt{COUNT}(\ast) & \textrm{count number of rows} \\
+ &&& a(E) & \textrm{other aggregate function} \\
+ &&& (E) & \textrm{explicit precedence} \\
+ \textrm{Nullary operators} & n &::=& \mt{CURRENT\_TIMESTAMP} \\
+ \textrm{Unary operators} & u &::=& \mt{NOT} \\
+ \textrm{Binary operators} & b &::=& \mt{AND} \mid \mt{OR} \mid \neq \mid < \mid \leq \mid > \mid \geq \\
+ \textrm{Aggregate functions} & a &::=& \mt{AVG} \mid \mt{SUM} \mid \mt{MIN} \mid \mt{MAX} \\
+ \textrm{Directions} & D &::=& \mt{ASC} \mid \mt{DESC} \\
+ \textrm{SQL integer} & N &::=& n \mid \{e\} \\
+\end{array}$$
+
+Additionally, an SQL expression may be inserted into normal Ur code with the syntax $(\mt{SQL} \; E)$ or $(\mt{WHERE} \; E)$.
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 4532add1a287aa8922ba5d0d556db3cd04e42420 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 12:10:51 -0500
Subject: DML
---
doc/manual.tex | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 79cda554..4915edfb 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1254,8 +1254,10 @@ Ur/Web features some syntactic shorthands for building values using the function
\subsubsection{Queries}
+Queries $Q$ are added to the rules for expressions $e$.
+
$$\begin{array}{rrcll}
- \textrm{Queries} & Q &::=& (q \; [\mt{ORDER} \; \mt{BY} \; (E \; [D],)^+] \; [\mt{LIMIT} \; N] \; [\mt{OFFSET} \; N]) \\
+ \textrm{Queries} & Q &::=& (q \; [\mt{ORDER} \; \mt{BY} \; (E \; [o],)^+] \; [\mt{LIMIT} \; N] \; [\mt{OFFSET} \; N]) \\
\textrm{Pre-queries} & q &::=& \mt{SELECT} \; P \; \mt{FROM} \; T,^+ \; [\mt{WHERE} \; E] \; [\mt{GROUP} \; \mt{BY} \; p,^+] \; [\mt{HAVING} \; E] \\
&&& \mid q \; R \; q \\
\textrm{Relational operators} & R &::=& \mt{UNION} \mid \mt{INTERSECT} \mid \mt{EXCEPT}
@@ -1292,11 +1294,23 @@ $$\begin{array}{rrcll}
\textrm{Unary operators} & u &::=& \mt{NOT} \\
\textrm{Binary operators} & b &::=& \mt{AND} \mid \mt{OR} \mid \neq \mid < \mid \leq \mid > \mid \geq \\
\textrm{Aggregate functions} & a &::=& \mt{AVG} \mid \mt{SUM} \mid \mt{MIN} \mid \mt{MAX} \\
- \textrm{Directions} & D &::=& \mt{ASC} \mid \mt{DESC} \\
+ \textrm{Directions} & o &::=& \mt{ASC} \mid \mt{DESC} \\
\textrm{SQL integer} & N &::=& n \mid \{e\} \\
\end{array}$$
Additionally, an SQL expression may be inserted into normal Ur code with the syntax $(\mt{SQL} \; E)$ or $(\mt{WHERE} \; E)$.
+\subsubsection{DML}
+
+DML commands $D$ are added to the rules for expressions $e$.
+
+$$\begin{array}{rrcll}
+ \textrm{Commands} & D &::=& (\mt{INSERT} \; \mt{INTO} \; T^E \; (f,^+) \; \mt{VALUES} \; (E,^+)) \\
+ &&& (\mt{UPDATE} \; T^E \; \mt{SET} \; (f = E,)^+ \; \mt{WHERE} \; E) \\
+ &&& (\mt{DELETE} \; \mt{FROM} \; T^E \; \mt{WHERE} \; E) \\
+ \textrm{Table expressions} & T^E &::=& x \mid \{\{e\}\}
+\end{array}$$
+
+Inside $\mt{UPDATE}$ and $\mt{DELETE}$ commands, lone variables $X$ are interpreted as references to columns of the implicit table $\mt{T}$, rather than to named expressions.
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From 13eec52acec99c062f98a80b38c590ad7adfd8b9 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 12:21:47 -0500
Subject: XML syntax
---
doc/manual.tex | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 4915edfb..b52146de 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1313,4 +1313,20 @@ $$\begin{array}{rrcll}
Inside $\mt{UPDATE}$ and $\mt{DELETE}$ commands, lone variables $X$ are interpreted as references to columns of the implicit table $\mt{T}$, rather than to named expressions.
+\subsection{XML}
+
+XML fragments $L$ are added to the rules for expressions $e$.
+
+$$\begin{array}{rrcll}
+ \textrm{XML fragments} & L &::=& \texttt{} \mid \texttt{}l^*\texttt{} \\
+ \textrm{XML pieces} & l &::=& \textrm{text} & \textrm{cdata} \\
+ &&& \texttt{<}g\texttt{/>} & \textrm{tag with no children} \\
+ &&& \texttt{<}g\texttt{>}l^*\texttt{}x\texttt{>} & \textrm{tag with children} \\
+ \textrm{Tag} & g &::=& h \; (x = v)^* \\
+ \textrm{Tag head} & h &::=& x & \textrm{tag name} \\
+ &&& h\{c\} & \textrm{constructor parameter} \\
+ \textrm{Attribute value} & v &::=& \ell & \textrm{literal value} \\
+ &&& \{e\} & \textrm{computed value} \\
+\end{array}$$
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From da893776fddf8136a4b8ae6cfcb536e0fe6863ca Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 14:50:03 -0500
Subject: Compiler phases
---
doc/manual.tex | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 98 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b52146de..12939a56 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -47,7 +47,7 @@ We write $\ell$ for literals of the primitive types, for the most part following
This version of the manual doesn't include operator precedences; see \texttt{src/urweb.grm} for that.
-\subsection{Core Syntax}
+\subsection{\label{core}Core Syntax}
\emph{Kinds} classify types and other compile-time-only entities. Each kind in the grammar is listed with a description of the sort of data it classifies.
$$\begin{array}{rrcll}
@@ -1329,4 +1329,101 @@ $$\begin{array}{rrcll}
&&& \{e\} & \textrm{computed value} \\
\end{array}$$
+
+\section{Compiler Phases}
+
+The Ur/Web compiler is unconventional in that it relies on a kind of \emph{heuristic compilation}. Not all valid programs will compile successfully. Informally, programs fail to compile when they are ``too higher order.'' Compiler phases do their best to eliminate different kinds of higher order-ness, but some programs just won't compile. This is a trade-off for producing very efficient executables. Compiled Ur/Web programs use native C representations and require no garbage collection.
+
+In this section, we step through the main phases of compilation, noting what consequences each phase has for effective programming.
+
+\subsection{Parse}
+
+The compiler reads a \texttt{.urp} file, figures out which \texttt{.urs} and \texttt{.ur} files it references, and combines them all into what is conceptually a single sequence of declarations in the core language of Section \ref{core}.
+
+\subsection{Elaborate}
+
+This is where type inference takes place, translating programs into an explicit form with no more wildcards. This phase is the most likely source of compiler error messages.
+
+\subsection{Unnest}
+
+Named local function definitions are moved to the top level, to avoid the need to generate closures.
+
+\subsection{Corify}
+
+Module system features are compiled away, through inlining of functor definitions at application sites. Afterward, most abstraction boundaries are broken, facilitating optimization.
+
+\subsection{Especialize}
+
+Functions are specialized to particular argument patterns. This is an important trick for avoiding the need to maintain any closures at runtime.
+
+\subsection{Untangle}
+
+Remove unnecessary mutual recursion, splitting recursive groups into strongly-connected components.
+
+\subsection{Shake}
+
+Remove all definitions not needed to run the page handlers that are visible in the signature of the last module listed in the \texttt{.urp} file.
+
+\subsection{Tag}
+
+Assign a URL name to each link and form action. It is important that these links and actions are written as applications of named functions, because such names are used to generate URL patterns. A URL pattern has a name built from the full module path of the named function, followed by the function name, with all pieces separated by slashes. The path of a functor application is based on the name given to the result, rather than the path of the functor itself.
+
+\subsection{Reduce}
+
+Apply definitional equality rules to simplify the program as much as possible. This effectively includes inlining of every non-recursive definition.
+
+\subsection{Unpoly}
+
+This phase specializes polymorphic functions to the specific arguments passed to them in the program. If the program contains real polymorphic recursion, Unpoly will be insufficient to avoid later error messages about too much polymorphism.
+
+\subsection{Specialize}
+
+Replace uses of parametrized datatypes with versions specialized to specific parameters. As for Unpoly, this phase will not be effective enough in the presence of polymorphic recursion or other fancy uses of impredicative polymorphism.
+
+\subsection{Shake}
+
+Here the compiler repeats the earlier shake phase.
+
+\subsection{Monoize}
+
+Programs are translated to a new intermediate language without polymorphism or non-$\mt{Type}$ constructors. Error messages may pop up here if earlier phases failed to remove such features.
+
+This is the stage at which concrete names are generated for cookies, tables, and sequences. They are named following the same convention as for links and actions, based on module path information saved from earlier stages. Table and sequence names separate path elements with underscores instead of slashes, and they are prefixed by \texttt{uw\_}.
+\subsection{MonoOpt}
+
+Simple algebraic laws are applied to simplify the program, focusing especially on efficient imperative generation of HTML pages.
+
+\subsection{MonoUntangle}
+
+Unnecessary mutual recursion is broken up again.
+
+\subsection{MonoReduce}
+
+Equivalents of the definitional equality rules are applied to simplify programs, with inlining again playing a major role.
+
+\subsection{MonoShake, MonoOpt}
+
+Unneeded declarations are removed, and basic optimizations are repeated.
+
+\subsection{Fuse}
+
+The compiler tries to simplify calls to recursive functions whose results are immediately written as page output. The write action is pushed inside the function definitions to avoid allocation of intermediate results.
+
+\subsection{MonoUntangle, MonoShake}
+
+Fuse often creates more opportunities to remove spurious mutual recursion.
+
+\subsection{Pathcheck}
+
+The compiler checks that no link or action name has been used more than once.
+
+\subsection{Cjrize}
+
+The program is translated to what is more or less a subset of C. If any use of functions as data remains at this point, the compiler will complain.
+
+\subsection{C Compilation and Linking}
+
+The output of the last phase is pretty-printed as C source code and passed to GCC.
+
+
\end{document}
\ No newline at end of file
--
cgit v1.2.3
From ca1b68736e14dff52c08e76a7a6dfa855d1884f9 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 15:01:21 -0500
Subject: The structure of web applications
---
doc/manual.tex | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 12939a56..46404f7c 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1330,6 +1330,17 @@ $$\begin{array}{rrcll}
\end{array}$$
+\section{The Structure of Web Applications}
+
+A web application is built from a series of modules, with one module, the last one appearing in the \texttt{.urp} file, designated as the main module. The signature of the main module determines the URL entry points to the application. Such an entry point should have type $\mt{unit} \to \mt{transaction} \; \mt{page}$, where $\mt{page}$ is a type synonym for top-level HTML pages, defined in $\mt{Basis}$. If such a function is at the top level of main module $M$, it will be accessible at URI \texttt{/M/f}, and so on for more deeply-nested functions, as described in Section \ref{tag} below.
+
+When the standalone web server receives a request for a known page, it calls the function for that page, ``running'' the resulting transaction to produce the page to return to the client. Pages link to other pages with the \texttt{link} attribute of the \texttt{a} HTML tag. A link has type $\mt{transaction} \; \mt{page}$, and the semantics of a link are that this transaction should be run to compute the result page, when the link is followed. Link targets are assigned URL names in the same way as top-level entry points.
+
+HTML forms are handled in a similar way. The $\mt{action}$ attribute of a $\mt{submit}$ form tag takes a value of type $\$\mt{use} \to \mt{transaction} \; \mt{page}$, where $\mt{use}$ is a kind-$\{\mt{Type}\}$ record of the form fields used by this action handler. Action handlers are assigned URL patterns in the same way as above.
+
+For both links and actions, direct arguments and local variables mentioned implicitly via closures are automatically included in serialized form in URLs, in the order in which they appeared in the source code.
+
+
\section{Compiler Phases}
The Ur/Web compiler is unconventional in that it relies on a kind of \emph{heuristic compilation}. Not all valid programs will compile successfully. Informally, programs fail to compile when they are ``too higher order.'' Compiler phases do their best to eliminate different kinds of higher order-ness, but some programs just won't compile. This is a trade-off for producing very efficient executables. Compiled Ur/Web programs use native C representations and require no garbage collection.
@@ -1364,7 +1375,7 @@ Remove unnecessary mutual recursion, splitting recursive groups into strongly-co
Remove all definitions not needed to run the page handlers that are visible in the signature of the last module listed in the \texttt{.urp} file.
-\subsection{Tag}
+\subsection{\label{tag}Tag}
Assign a URL name to each link and form action. It is important that these links and actions are written as applications of named functions, because such names are used to generate URL patterns. A URL pattern has a name built from the full module path of the named function, followed by the function name, with all pieces separated by slashes. The path of a functor application is based on the name given to the result, rather than the path of the functor itself.
--
cgit v1.2.3
From a317e5050fe88a8672a5da5faa2d7180ab285a0d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 7 Dec 2008 15:10:59 -0500
Subject: Intro
---
doc/manual.tex | 29 ++++++++++++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 46404f7c..8d507792 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1,5 +1,5 @@
\documentclass{article}
-\usepackage{fullpage,amsmath,amssymb,proof}
+\usepackage{fullpage,amsmath,amssymb,proof,url}
\newcommand{\cd}[1]{\texttt{#1}}
\newcommand{\mt}[1]{\mathsf{#1}}
@@ -17,6 +17,33 @@
\tableofcontents
+
+\section{Introduction}
+
+\emph{Ur} is a programming language designed to introduce richer type system features into functional programming in the tradition of ML and Haskell. Ur is functional, pure, statically-typed, and strict. Ur supports a powerful kind of \emph{metaprogramming} based on \emph{row types}.
+
+\emph{Ur/Web} is Ur plus a special standard library and associated rules for parsing and optimization. Ur/Web supports construction of dynamic web applications backed by SQL databases. The signature of the standard library is such that well-typed Ur/Web programs ``don't go wrong'' in a very broad sense. Not only do they not crash during particular page generations, but they also may not:
+
+\begin{itemize}
+\item Suffer from any kinds of code-injection attacks
+\item Return invalid HTML
+\item Contain dead intra-application links
+\item Have mismatches between HTML forms and the fields expected by their handlers
+\item Attempt invalid SQL queries
+\item Use improper marshaling or unmarshaling in communication with SQL databases
+\end{itemize}
+
+This type safety is just the foundation of the Ur/Web methodology. It is also possible to use metaprogramming to build significant application pieces by analysis of type structure. For instance, the demo includes an ML-style functor for building an admin interface for an arbitrary SQL table. The type system guarantees that the admin interface sub-application that comes out will always be free of the above-listed bugs, no matter which well-typed table description is given as input.
+
+The Ur/Web compiler also produces very efficient object code that does not use garbage collection. These compiled programs will often be even more efficient than what most programmers would bother to write in C.
+
+\medskip
+
+The official web site for Ur is:
+\begin{center}
+ \url{http://www.impredicative.com/ur/}
+\end{center}
+
\section{Ur Syntax}
In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
--
cgit v1.2.3
From d3a3f5f7e087580215f82afe90a4f64f1a75ebc1 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 11:40:51 -0500
Subject: Installation
---
doc/manual.tex | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 55 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 8d507792..942cee77 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -44,6 +44,61 @@ The official web site for Ur is:
\url{http://www.impredicative.com/ur/}
\end{center}
+
+\section{Installation}
+
+If you are lucky, then the following standard command sequence will suffice for installation, in a directory to which you have unpacked the latest distribution tarball.
+
+\begin{verbatim}
+./configure
+make
+sudo make install
+\end{verbatim}
+
+Some other packages must be installed for the above to work. At a minimum, you need a standard UNIX shell, with standard UNIX tools like sed and GCC in your execution path; and MLton, the whole-program optimizing compiler for Standard ML. To build programs that access SQL databases, you also need libpq, the PostgreSQL client library. As of this writing, in the ``testing'' version of Debian Linux, this command will install the more uncommon of these dependencies:
+
+\begin{verbatim}
+apt-get install mlton libpq-dev
+\end{verbatim}
+
+It is also possible to access the modules of the Ur/Web compiler interactively, within Standard ML of New Jersey. To install the prerequisites in Debian testing:
+
+\begin{verbatim}
+apt-get install smlnj libsmlnj-smlnj ml-yacc ml-lpt
+\end{verbatim}
+
+To begin an interactive session with the Ur compiler modules, run \texttt{make smlnj}, and then, from within an \texttt{sml} session, run \texttt{CM.make "src/urweb.cm";}. The \texttt{Compiler} module is the main entry point.
+
+To run an SQL-backed application, you will probably want to install the PostgreSQL server. Version 8.3 or higher is required.
+
+\begin{verbatim}
+apt-get install postgresql-8.3
+\end{verbatim}
+
+To use the Emacs mode, you must have a modern Emacs installed. We assume that you already know how to do this, if you're in the business of looking for an Emacs mode. The demo generation facility of the compiler will also call out to Emacs to syntax-highlight code, and that process depends on the \texttt{htmlize} module, which can be installed in Debian testing via:
+
+\begin{verbatim}
+apt-get install emacs-goodies-el
+\end{verbatim}
+
+Even with the right packages installed, configuration and building might fail to work. After you run \texttt{./configure}, you will see the values of some named environment variables printed. You may need to adjust these values to get proper installation for your system. To change a value, store your preferred alternative in the corresponding UNIX environment variable, before running \texttt{./configure}. For instance, here is how to change the list of extra arguments that the Ur/Web compiler will pass to GCC on every invocation.
+
+\begin{verbatim}
+GCCARGS=-fnested-functions ./configure
+\end{verbatim}
+
+Some OSX users have reported needing to use this particular GCCARGS value.
+
+The Emacs mode can be set to autoload by adding the following to your \texttt{.emacs} file.
+
+\begin{verbatim}
+(add-to-list 'load-path "/usr/local/share/emacs/site-lisp/urweb-mode")
+(load "urweb-mode-startup")
+\end{verbatim}
+
+Change the path in the first line if you chose a different Emacs installation path during configuration.
+
+
\section{Ur Syntax}
In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
--
cgit v1.2.3
From 5bb4dcc90dc61ef431539e049b160e2971cf4621 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 11:52:56 -0500
Subject: .urp files
---
doc/manual.tex | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 942cee77..141c4b45 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -99,6 +99,36 @@ The Emacs mode can be set to autoload by adding the following to your \texttt{.e
Change the path in the first line if you chose a different Emacs installation path during configuration.
+\section{Command-Line Compiler}
+
+\subsection{Project Files}
+
+The basic inputs to the \texttt{urweb} compiler are project files, which have the extension \texttt{.urp}. Here is a sample \texttt{.urp} file.
+
+\begin{verbatim}
+database dbname=test
+sql crud1.sql
+
+crud
+crud1
+\end{verbatim}
+
+The \texttt{database} line gives the database information string to pass to libpq. In this case, the string only says to connect to a local database named \texttt{test}.
+
+The \texttt{sql} line asks for an SQL source file to be generated, giving the commands to run to create the tables and sequences that this application expects to find. After building this \texttt{.urp} file, the following commands could be used to initialize the database, assuming that the current UNIX user exists as a Postgres user with database creation privileges:
+
+\begin{verbatim}
+createdb test
+psql -f crud1.sql test
+\end{verbatim}
+
+A blank line always separates the named directives from a list of modules to include in the project; if there are no named directives, a blank line must begin the file.
+
+For each entry \texttt{M} in the module list, the file \texttt{M.urs} is included in the project if it exists, and the file \texttt{M.ur} must exist and is always included.
+
+A few other named directives are supported. \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application; the default is \texttt{/}. \texttt{exe FILENAME} sets the filename to which to write the output executable; the default for file \texttt{P.urp} is \texttt{P.exe}. \texttt{debug} saves some intermediate C files, which is mostly useful to help in debugging the compiler itself. \texttt{profile} generates an executable that may be used with gprof.
+
+
\section{Ur Syntax}
In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
--
cgit v1.2.3
From 86360921e7d299c1e20c0adc5d382f70b64b822f Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 11:57:17 -0500
Subject: Building an application
---
doc/manual.tex | 12 ++++++++++++
1 file changed, 12 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 141c4b45..9255fc87 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -128,6 +128,18 @@ For each entry \texttt{M} in the module list, the file \texttt{M.urs} is include
A few other named directives are supported. \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application; the default is \texttt{/}. \texttt{exe FILENAME} sets the filename to which to write the output executable; the default for file \texttt{P.urp} is \texttt{P.exe}. \texttt{debug} saves some intermediate C files, which is mostly useful to help in debugging the compiler itself. \texttt{profile} generates an executable that may be used with gprof.
+\subsection{Building an Application}
+
+To compile project \texttt{P.urp}, simply run
+\begin{verbatim}
+urweb P
+\end{verbatim}
+
+To time how long the different compiler phases run, without generating an executable, run
+\begin{verbatim}
+urweb -timing P
+\end{verbatim}
+
\section{Ur Syntax}
--
cgit v1.2.3
From 55fefa6122803e9739e9e71f1d50eae671665df4 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 14:06:51 -0500
Subject: Proofreading pass
---
doc/manual.tex | 45 ++++++++++++++++++++++-----------------------
1 file changed, 22 insertions(+), 23 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 9255fc87..3c97b720 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -134,6 +134,7 @@ To compile project \texttt{P.urp}, simply run
\begin{verbatim}
urweb P
\end{verbatim}
+The output executable is a standalone web server. Run it with the command-line argument \texttt{-h} to see which options it takes. If the project file lists a database, the web server will attempt to connect to that database on startup.
To time how long the different compiler phases run, without generating an executable, run
\begin{verbatim}
@@ -188,7 +189,7 @@ $$\begin{array}{rrcll}
Ur supports several different notions of functions that take types as arguments. These arguments can be either implicit, causing them to be inferred at use sites; or explicit, forcing them to be specified manually at use sites. There is a common explicitness annotation convention applied at the definitions of and in the types of such functions.
$$\begin{array}{rrcll}
\textrm{Explicitness} & ? &::=& :: & \textrm{explicit} \\
- &&& \; ::: & \textrm{implicit}
+ &&& ::: & \textrm{implicit}
\end{array}$$
\emph{Constructors} are the main class of compile-time-only data. They include proper types and are classified by kinds.
@@ -210,7 +211,7 @@ $$\begin{array}{rrcll}
&&& c \rc c & \textrm{type-level record concatenation} \\
&&& \mt{fold} & \textrm{type-level record fold} \\
\\
- &&& (c^+) & \textrm{type-level tuple} \\
+ &&& (c,^+) & \textrm{type-level tuple} \\
&&& c.n & \textrm{type-level tuple projection ($n \in \mathbb N^+$)} \\
\\
&&& \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
@@ -452,9 +453,9 @@ $$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c :: \kappa}{
We will use a keyword $\mt{map}$ as a shorthand, such that, for $f$ of kind $\kappa \to \kappa'$, $\mt{map} \; f$ stands for $\mt{fold} \; (\lambda (x_1 :: \mt{Name}) (x_2 :: \kappa) (x_3 :: \{\kappa'\}) \Rightarrow [x_1 = f \; x_2] \rc x_3) \; []$.
$$\infer{\Gamma \vdash c_1 \sim c_2}{
- \Gamma \vdash c_1 \hookrightarrow c'_1
- & \Gamma \vdash c_2 \hookrightarrow c'_2
- & \forall c''_1 \in c'_1, c''_2 \in c'_2: \Gamma \vdash c''_1 \sim c''_2
+ \Gamma \vdash c_1 \hookrightarrow C_1
+ & \Gamma \vdash c_2 \hookrightarrow C_2
+ & \forall c'_1 \in C_1, c'_2 \in C_2: \Gamma \vdash c'_1 \sim c'_2
}
\quad \infer{\Gamma \vdash X \sim X'}{
X \neq X'
@@ -462,10 +463,10 @@ $$\infer{\Gamma \vdash c_1 \sim c_2}{
$$\infer{\Gamma \vdash c_1 \sim c_2}{
c'_1 \sim c'_2 \in \Gamma
- & \Gamma \vdash c'_1 \hookrightarrow c''_1
- & \Gamma \vdash c'_2 \hookrightarrow c''_2
- & c_1 \in c''_1
- & c_2 \in c''_2
+ & \Gamma \vdash c'_1 \hookrightarrow C_1
+ & \Gamma \vdash c'_2 \hookrightarrow C_2
+ & c_1 \in C_1
+ & c_2 \in C_2
}$$
$$\infer{\Gamma \vdash c \hookrightarrow \{c\}}{}
@@ -656,8 +657,7 @@ We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \lead
This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. Section \ref{typeclasses} gives an informal description of how type classes influence type inference.
-We presuppose the existence of a function $\mathcal O$, where $\mathcal(M, \overline{s})$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature items $\overline{s}$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $S$.
-
+We presuppose the existence of a function $\mathcal O$, where $\mathcal O(M, \overline{s})$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature items $\overline{s}$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $\overline s$.
We write $\kappa_1^n \to \kappa$ as a shorthand, where $\kappa_1^0 \to \kappa = \kappa$ and $\kappa_1^{n+1} \to \kappa_2 = \kappa_1 \to (\kappa_1^n \to \kappa_2)$. We write $\mt{len}(\overline{y})$ for the length of vector $\overline{y}$ of variables.
$$\infer{\Gamma \vdash \cdot \leadsto \Gamma}{}
@@ -690,10 +690,10 @@ $$\infer{\Gamma \vdash \mt{val} \; \mt{rec} \; \overline{x : \tau = e} \leadsto
$$\infer{\Gamma \vdash \mt{structure} \; X : S = M \leadsto \Gamma, X : S}{
\Gamma \vdash M : S
- & \textrm{ ($M$ not a $\mt{struct} \; \ldots \; \mt{end}$)}
+ & \textrm{ $M$ not a constant or application}
}
-\quad \infer{\Gamma \vdash \mt{structure} \; X : S = \mt{struct} \; \overline{d} \; \mt{end} \leadsto \Gamma, X : \mt{selfify}(X, \overline{s})}{
- \Gamma \vdash \mt{struct} \; \overline{d} \; \mt{end} : \mt{sig} \; \overline{s} \; \mt{end}
+\quad \infer{\Gamma \vdash \mt{structure} \; X : S = M \leadsto \Gamma, X : \mt{selfify}(X, \overline{s})}{
+ \Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
}$$
$$\infer{\Gamma \vdash \mt{signature} \; X = S \leadsto \Gamma, X = S}{
@@ -786,7 +786,7 @@ $$\infer{\Gamma \vdash \mt{class} \; x = c \leadsto \Gamma, x :: \mt{Type} \to \
\subsection{Signature Compatibility}
-To simplify the judgments in this section, we assume that all signatures are alpha-varied as necessary to avoid including mmultiple bindings for the same identifier. This is in addition to the usual alpha-variation of locally-bound variables.
+To simplify the judgments in this section, we assume that all signatures are alpha-varied as necessary to avoid including multiple bindings for the same identifier. This is in addition to the usual alpha-variation of locally-bound variables.
We rely on a judgment $\Gamma \vdash \overline{s} \leq s'$, which expresses the occurrence in signature items $\overline{s}$ of an item compatible with $s'$. We also use a judgment $\Gamma \vdash \overline{dc} \leq \overline{dc}$, which expresses compatibility of datatype definitions.
@@ -835,7 +835,7 @@ $$\infer{\Gamma \vdash \mt{functor} (X : S_1) : S_2 \leq \mt{functor} (X : S'_1)
$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa \leq \mt{con} \; x :: \kappa}{}
\quad \infer{\Gamma \vdash \mt{con} \; x :: \kappa = c \leq \mt{con} \; x :: \kappa}{}
-\quad \infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leq \mt{con} \; x :: \mt{Type}}{}$$
+\quad \infer{\Gamma \vdash \mt{datatype} \; x \; \overline{y} = \overline{dc} \leq \mt{con} \; x :: \mt{Type}^{\mt{len}(\overline y)} \to \mt{Type}}{}$$
$$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{con} \; x :: \mt{Type}^{\mt{len}(y)} \to \mt{Type}}{
\Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
@@ -946,10 +946,9 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{sigOf}(\mt{cookie} \; x : \tau) &=& \mt{cookie} \; x : \tau \\
\mt{sigOf}(\mt{class} \; x = c) &=& \mt{class} \; x = c \\
\end{eqnarray*}
-
\begin{eqnarray*}
\mt{selfify}(M, \cdot) &=& \cdot \\
- \mt{selfify}(M, s \; \overline{s'}) &=& \mt{selfify}(M, \sigma, s) \; \mt{selfify}(M, \overline{s'}) \\
+ \mt{selfify}(M, s \; \overline{s'}) &=& \mt{selfify}(M, s) \; \mt{selfify}(M, \overline{s'}) \\
\\
\mt{selfify}(M, \mt{con} \; x :: \kappa) &=& \mt{con} \; x :: \kappa = M.x \\
\mt{selfify}(M, \mt{con} \; x :: \kappa = c) &=& \mt{con} \; x :: \kappa = c \\
@@ -984,7 +983,7 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to M.x \; \overline y \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
&& \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z = (\overline{y}, \overline{dc})$ and $X \in \overline{dc}$)} \\
\mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z, \mt{val} \; X) &=& \overline{y ::: \mt{Type}} \to \tau \to M.x \; \overline y \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
- && \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z = (\overline{y}, \overline{dc})$ and $X : \tau \in \overline{dc}$)} \\
+ && \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z = (\overline{y}, \overline{dc})$ and $X \; \mt{of} \; \tau \in \overline{dc}$)} \\
\\
\mt{proj}(M, \mt{structure} \; X : S \; \overline{s}, \mt{structure} \; X) &=& S \\
\\
@@ -1391,7 +1390,7 @@ $$\begin{array}{rrcll}
\textrm{Projections} & P &::=& \ast & \textrm{all columns} \\
&&& p,^+ & \textrm{particular columns} \\
\textrm{Pre-projections} & p &::=& t.f & \textrm{one column from a table} \\
- &&& t.\{\{c\}\} & \textrm{a record of colums from a table (of kind $\{\mt{Type}\}$)} \\
+ &&& t.\{\{c\}\} & \textrm{a record of columns from a table (of kind $\{\mt{Type}\}$)} \\
\textrm{Table names} & t &::=& x & \textrm{constant table name (automatically capitalized)} \\
&&& X & \textrm{constant table name} \\
&&& \{\{c\}\} & \textrm{computed table name (of kind $\mt{Name}$)} \\
@@ -1462,7 +1461,7 @@ When the standalone web server receives a request for a known page, it calls the
HTML forms are handled in a similar way. The $\mt{action}$ attribute of a $\mt{submit}$ form tag takes a value of type $\$\mt{use} \to \mt{transaction} \; \mt{page}$, where $\mt{use}$ is a kind-$\{\mt{Type}\}$ record of the form fields used by this action handler. Action handlers are assigned URL patterns in the same way as above.
-For both links and actions, direct arguments and local variables mentioned implicitly via closures are automatically included in serialized form in URLs, in the order in which they appeared in the source code.
+For both links and actions, direct arguments and local variables mentioned implicitly via closures are automatically included in serialized form in URLs, in the order in which they appear in the source code.
\section{Compiler Phases}
@@ -1513,11 +1512,11 @@ This phase specializes polymorphic functions to the specific arguments passed to
\subsection{Specialize}
-Replace uses of parametrized datatypes with versions specialized to specific parameters. As for Unpoly, this phase will not be effective enough in the presence of polymorphic recursion or other fancy uses of impredicative polymorphism.
+Replace uses of parameterized datatypes with versions specialized to specific parameters. As for Unpoly, this phase will not be effective enough in the presence of polymorphic recursion or other fancy uses of impredicative polymorphism.
\subsection{Shake}
-Here the compiler repeats the earlier shake phase.
+Here the compiler repeats the earlier Shake phase.
\subsection{Monoize}
--
cgit v1.2.3
From 5108a7e86734b335b65b9efd60a7f2f2797b602b Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 14:41:19 -0500
Subject: Add SQL arithmetic operators
---
doc/manual.tex | 24 +++++++++++----------
lib/basis.urs | 30 +++++++++++++++-----------
lib/top.ur | 2 +-
src/monoize.sml | 63 +++++++++++++++++++++++++------------------------------
src/urweb.grm | 29 +++++++++++++------------
tests/sql_ops.ur | 8 +++++++
tests/sql_ops.urp | 6 ++++++
7 files changed, 89 insertions(+), 73 deletions(-)
create mode 100644 tests/sql_ops.ur
create mode 100644 tests/sql_ops.urp
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 3c97b720..21092735 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1198,7 +1198,7 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; (\mt{option} \; \mt{t}) \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{bool}
\end{array}$$
-We have generic nullary, unary, and binary operators, as well as comparison operators.
+We have generic nullary, unary, and binary operators.
$$\begin{array}{l}
\mt{con} \; \mt{sql\_nfunc} :: \mt{Type} \to \mt{Type} \\
\mt{val} \; \mt{sql\_current\_timestamp} : \mt{sql\_nfunc} \; \mt{time} \\
@@ -1221,16 +1221,16 @@ $$\begin{array}{l}
\end{array}$$
$$\begin{array}{l}
- \mt{type} \; \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_eq} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_ne} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_lt} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_le} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_gt} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_ge} : \mt{sql\_comparison} \\
- \mt{val} \; \mt{sql\_comparison} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \\
- \hspace{.1in} \to \mt{sql\_comparison} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{bool}
- \end{array}$$
+ \mt{class} \; \mt{sql\_arith} \\
+ \mt{val} \; \mt{sql\_int\_arith} : \mt{sql\_arith} \; \mt{int} \\
+ \mt{val} \; \mt{sql\_float\_arith} : \mt{sql\_arith} \; \mt{float} \\
+ \mt{val} \; \mt{sql\_neg} : \mt{t} ::: \mt{Type} \to \mt{sql\_arith} \; \mt{t} \to \mt{sql\_unary} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_plus} : \mt{t} ::: \mt{Type} \to \mt{sql\_arith} \; \mt{t} \to \mt{sql\_binary} \; \mt{t} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_minus} : \mt{t} ::: \mt{Type} \to \mt{sql\_arith} \; \mt{t} \to \mt{sql\_binary} \; \mt{t} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_times} : \mt{t} ::: \mt{Type} \to \mt{sql\_arith} \; \mt{t} \to \mt{sql\_binary} \; \mt{t} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_div} : \mt{t} ::: \mt{Type} \to \mt{sql\_arith} \; \mt{t} \to \mt{sql\_binary} \; \mt{t} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_mod} : \mt{sql\_binary} \; \mt{int} \; \mt{int} \; \mt{int}
+\end{array}$$
Finally, we have aggregate functions. The $\mt{COUNT(\ast)}$ syntax is handled specially, since it takes no real argument. The other aggregate functions are placed into a general type family, using type classes to restrict usage to properly-typed arguments. The key aspect of the $\mt{sql\_aggregate}$ function's type is the shift of aggregate-function-only fields into unrestricted fields.
@@ -1445,6 +1445,8 @@ $$\begin{array}{rrcll}
\textrm{XML pieces} & l &::=& \textrm{text} & \textrm{cdata} \\
&&& \texttt{<}g\texttt{/>} & \textrm{tag with no children} \\
&&& \texttt{<}g\texttt{>}l^*\texttt{}x\texttt{>} & \textrm{tag with children} \\
+ &&& \{e\} & \textrm{computed XML fragment} \\
+ &&& \{[e]\} & \textrm{injection of an Ur expression, via the $\mt{Top}.\mt{txt}$ function} \\
\textrm{Tag} & g &::=& h \; (x = v)^* \\
\textrm{Tag head} & h &::=& x & \textrm{tag name} \\
&&& h\{c\} & \textrm{constructor parameter} \\
diff --git a/lib/basis.urs b/lib/basis.urs
index 9681328f..eb2a6d29 100644
--- a/lib/basis.urs
+++ b/lib/basis.urs
@@ -202,6 +202,10 @@ val sql_is_null : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
-> sql_exp tables agg exps (option t)
-> sql_exp tables agg exps bool
+class sql_arith
+val sql_int_arith : sql_arith int
+val sql_float_arith : sql_arith float
+
con sql_unary :: Type -> Type -> Type
val sql_not : sql_unary bool bool
val sql_unary : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
@@ -209,6 +213,8 @@ val sql_unary : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
-> sql_unary arg res -> sql_exp tables agg exps arg
-> sql_exp tables agg exps res
+val sql_neg : t ::: Type -> sql_arith t -> sql_unary t t
+
con sql_binary :: Type -> Type -> Type -> Type
val sql_and : sql_binary bool bool bool
val sql_or : sql_binary bool bool bool
@@ -218,18 +224,18 @@ val sql_binary : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
-> sql_exp tables agg exps arg2
-> sql_exp tables agg exps res
-type sql_comparison
-val sql_eq : sql_comparison
-val sql_ne : sql_comparison
-val sql_lt : sql_comparison
-val sql_le : sql_comparison
-val sql_gt : sql_comparison
-val sql_ge : sql_comparison
-val sql_comparison : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
- -> t ::: Type
- -> sql_comparison
- -> sql_exp tables agg exps t -> sql_exp tables agg exps t
- -> sql_exp tables agg exps bool
+val sql_plus : t ::: Type -> sql_arith t -> sql_binary t t t
+val sql_minus : t ::: Type -> sql_arith t -> sql_binary t t t
+val sql_times : t ::: Type -> sql_arith t -> sql_binary t t t
+val sql_div : t ::: Type -> sql_arith t -> sql_binary t t t
+val sql_mod : sql_binary int int int
+
+val sql_eq : t ::: Type -> sql_binary t t bool
+val sql_ne : t ::: Type -> sql_binary t t bool
+val sql_lt : t ::: Type -> sql_binary t t bool
+val sql_le : t ::: Type -> sql_binary t t bool
+val sql_gt : t ::: Type -> sql_binary t t bool
+val sql_ge : t ::: Type -> sql_binary t t bool
val sql_count : tables ::: {{Type}} -> agg ::: {{Type}} -> exps ::: {Type}
-> sql_exp tables agg exps int
diff --git a/lib/top.ur b/lib/top.ur
index 76fe73c1..fd7676a3 100644
--- a/lib/top.ur
+++ b/lib/top.ur
@@ -238,4 +238,4 @@ fun eqNullable' (tables ::: {{Type}}) (agg ::: {{Type}}) (exps ::: {Type})
(e2 : option t) =
case e2 of
None => (SQL {e1} IS NULL)
- | Some _ => sql_comparison sql_eq e1 (@sql_inject inj e2)
+ | Some _ => sql_binary sql_eq e1 (@sql_inject inj e2)
diff --git a/src/monoize.sml b/src/monoize.sml
index cd20e366..1880c57d 100644
--- a/src/monoize.sml
+++ b/src/monoize.sml
@@ -165,14 +165,14 @@ fun monoType env =
(L'.TFfi ("Basis", "string"), loc)
| L.CApp ((L.CApp ((L.CApp ((L.CFfi ("Basis", "sql_binary"), _), _), _), _), _), _) =>
(L'.TFfi ("Basis", "string"), loc)
- | L.CFfi ("Basis", "sql_comparison") =>
- (L'.TFfi ("Basis", "string"), loc)
| L.CApp ((L.CFfi ("Basis", "sql_aggregate"), _), t) =>
(L'.TFfi ("Basis", "string"), loc)
| L.CApp ((L.CFfi ("Basis", "sql_summable"), _), _) =>
(L'.TRecord [], loc)
| L.CApp ((L.CFfi ("Basis", "sql_maxable"), _), _) =>
(L'.TRecord [], loc)
+ | L.CApp ((L.CFfi ("Basis", "sql_arith"), _), _) =>
+ (L'.TRecord [], loc)
| L.CApp ((L.CFfi ("Basis", "sql_nfunc"), _), _) =>
(L'.TFfi ("Basis", "string"), loc)
@@ -1369,19 +1369,34 @@ fun monoExp (env, st, fm) (all as (e, loc)) =
fm)
end
- | L.EFfi ("Basis", "sql_eq") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_eq"), _), _) =>
((L'.EPrim (Prim.String "="), loc), fm)
- | L.EFfi ("Basis", "sql_ne") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_ne"), _), _) =>
((L'.EPrim (Prim.String "<>"), loc), fm)
- | L.EFfi ("Basis", "sql_lt") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_lt"), _), _) =>
((L'.EPrim (Prim.String "<"), loc), fm)
- | L.EFfi ("Basis", "sql_le") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_le"), _), _) =>
((L'.EPrim (Prim.String "<="), loc), fm)
- | L.EFfi ("Basis", "sql_gt") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_gt"), _), _) =>
((L'.EPrim (Prim.String ">"), loc), fm)
- | L.EFfi ("Basis", "sql_ge") =>
+ | L.ECApp ((L.EFfi ("Basis", "sql_ge"), _), _) =>
((L'.EPrim (Prim.String ">="), loc), fm)
+ | L.ECApp ((L.EFfi ("Basis", "sql_plus"), _), _) =>
+ ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
+ (L'.EPrim (Prim.String "+"), loc)), loc), fm)
+ | L.ECApp ((L.EFfi ("Basis", "sql_minus"), _), _) =>
+ ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
+ (L'.EPrim (Prim.String "-"), loc)), loc), fm)
+ | L.ECApp ((L.EFfi ("Basis", "sql_times"), _), _) =>
+ ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
+ (L'.EPrim (Prim.String "*"), loc)), loc), fm)
+ | L.ECApp ((L.EFfi ("Basis", "sql_div"), _), _) =>
+ ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
+ (L'.EPrim (Prim.String "/"), loc)), loc), fm)
+ | L.EFfi ("Basis", "sql_mod") =>
+ ((L'.EPrim (Prim.String "%"), loc), fm)
+
| L.ECApp (
(L.ECApp (
(L.ECApp (
@@ -1407,6 +1422,9 @@ fun monoExp (env, st, fm) (all as (e, loc)) =
fm)
end
| L.EFfi ("Basis", "sql_not") => ((L'.EPrim (Prim.String "NOT"), loc), fm)
+ | L.ECApp ((L.EFfi ("Basis", "sql_neg"), _), _) =>
+ ((L'.EAbs ("_", (L'.TRecord [], loc), (L'.TFfi ("Basis", "string"), loc),
+ (L'.EPrim (Prim.String "-"), loc)), loc), fm)
| L.ECApp (
(L.ECApp (
@@ -1440,32 +1458,6 @@ fun monoExp (env, st, fm) (all as (e, loc)) =
| L.EFfi ("Basis", "sql_and") => ((L'.EPrim (Prim.String "AND"), loc), fm)
| L.EFfi ("Basis", "sql_or") => ((L'.EPrim (Prim.String "OR"), loc), fm)
- | L.ECApp (
- (L.ECApp (
- (L.ECApp (
- (L.ECApp (
- (L.EFfi ("Basis", "sql_comparison"), _),
- _), _),
- _), _),
- _), _),
- _) =>
- let
- val s = (L'.TFfi ("Basis", "string"), loc)
- fun sc s = (L'.EPrim (Prim.String s), loc)
- in
- ((L'.EAbs ("c", s, (L'.TFun (s, (L'.TFun (s, s), loc)), loc),
- (L'.EAbs ("e1", s, (L'.TFun (s, s), loc),
- (L'.EAbs ("e2", s, s,
- strcat loc [sc "(",
- (L'.ERel 1, loc),
- sc " ",
- (L'.ERel 2, loc),
- sc " ",
- (L'.ERel 0, loc),
- sc ")"]), loc)), loc)), loc),
- fm)
- end
-
| L.ECApp (
(L.ECApp (
(L.ECApp (
@@ -1566,6 +1558,9 @@ fun monoExp (env, st, fm) (all as (e, loc)) =
(L'.EPrim (Prim.String "SUM"), loc)), loc),
fm)
+ | L.EFfi ("Basis", "sql_arith_int") => ((L'.ERecord [], loc), fm)
+ | L.EFfi ("Basis", "sql_arith_float") => ((L'.ERecord [], loc), fm)
+
| L.EFfi ("Basis", "sql_maxable_int") => ((L'.ERecord [], loc), fm)
| L.EFfi ("Basis", "sql_maxable_float") => ((L'.ERecord [], loc), fm)
| L.EFfi ("Basis", "sql_maxable_string") => ((L'.ERecord [], loc), fm)
diff --git a/src/urweb.grm b/src/urweb.grm
index 3d77905e..7798b018 100644
--- a/src/urweb.grm
+++ b/src/urweb.grm
@@ -119,15 +119,6 @@ fun amend_group loc (gi, tabs) =
fun sql_inject (v, loc) =
(EApp ((EVar (["Basis"], "sql_inject", Infer), loc), (v, loc)), loc)
-fun sql_compare (oper, sqlexp1, sqlexp2, loc) =
- let
- val e = (EVar (["Basis"], "sql_comparison", Infer), loc)
- val e = (EApp (e, (EVar (["Basis"], "sql_" ^ oper, Infer), loc)), loc)
- val e = (EApp (e, sqlexp1), loc)
- in
- (EApp (e, sqlexp2), loc)
- end
-
fun sql_binary (oper, sqlexp1, sqlexp2, loc) =
let
val e = (EVar (["Basis"], "sql_binary", Infer), loc)
@@ -1239,16 +1230,24 @@ sqlexp : TRUE (sql_inject (EVar (["Basis"], "True", In
| LBRACE eexp RBRACE (eexp)
- | sqlexp EQ sqlexp (sql_compare ("eq", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
- | sqlexp NE sqlexp (sql_compare ("ne", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
- | sqlexp LT sqlexp (sql_compare ("lt", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
- | sqlexp LE sqlexp (sql_compare ("le", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
- | sqlexp GT sqlexp (sql_compare ("gt", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
- | sqlexp GE sqlexp (sql_compare ("ge", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp EQ sqlexp (sql_binary ("eq", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp NE sqlexp (sql_binary ("ne", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp LT sqlexp (sql_binary ("lt", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp LE sqlexp (sql_binary ("le", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp GT sqlexp (sql_binary ("gt", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp GE sqlexp (sql_binary ("ge", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+
+ | sqlexp PLUS sqlexp (sql_binary ("plus", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp MINUS sqlexp (sql_binary ("minus", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp STAR sqlexp (sql_binary ("times", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp DIVIDE sqlexp (sql_binary ("div", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+ | sqlexp MOD sqlexp (sql_binary ("mod", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
| sqlexp CAND sqlexp (sql_binary ("and", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
| sqlexp OR sqlexp (sql_binary ("or", sqlexp1, sqlexp2, s (sqlexp1left, sqlexp2right)))
+
| NOT sqlexp (sql_unary ("not", sqlexp, s (NOTleft, sqlexpright)))
+ | MINUS sqlexp (sql_unary ("neg", sqlexp, s (MINUSleft, sqlexpright)))
| sqlexp IS NULL (let
val loc = s (sqlexpleft, NULLright)
diff --git a/tests/sql_ops.ur b/tests/sql_ops.ur
new file mode 100644
index 00000000..34e78775
--- /dev/null
+++ b/tests/sql_ops.ur
@@ -0,0 +1,8 @@
+table t : { A : int, B : float }
+
+val q = (SELECT t.A + t.A AS X, t.B * t.B AS Y FROM t)
+
+fun main () : transaction page =
+ xml <- queryX q (fn r => {[r.X]}, {[r.Y]}
);
+ return {xml}
+
diff --git a/tests/sql_ops.urp b/tests/sql_ops.urp
new file mode 100644
index 00000000..90e47b77
--- /dev/null
+++ b/tests/sql_ops.urp
@@ -0,0 +1,6 @@
+debug
+database dbname=sql_ops
+sql sql_ops.sql
+exe /tmp/webapp
+
+sql_ops
--
cgit v1.2.3
From 5d92ee7289e6df76694bebaa585160e5b3c79013 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 9 Dec 2008 14:43:43 -0500
Subject: Spell check
---
doc/manual.tex | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 21092735..930fd9f9 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1009,9 +1009,9 @@ The Ur/Web compiler uses \emph{heuristic type inference}, with no claims of comp
\subsection{Basic Unification}
-Type-checkers for languages based on the Hindly-Milner type discipline, like ML and Haskell, take advantage of \emph{principal typing} properties, making complete type inference relatively straightforward. Inference algorithms are traditionally implemented using type unification variables, at various points asserting equalities between types, in the process discovering the values of type variables. The Ur/Web compiler uses the same basic strategy, but the complexity of the type system rules out easy completeness.
+Type-checkers for languages based on the Hindley-Milner type discipline, like ML and Haskell, take advantage of \emph{principal typing} properties, making complete type inference relatively straightforward. Inference algorithms are traditionally implemented using type unification variables, at various points asserting equalities between types, in the process discovering the values of type variables. The Ur/Web compiler uses the same basic strategy, but the complexity of the type system rules out easy completeness.
-Type-checking can require evaluating recursive functional programs, thanks to the type-level $\mt{fold}$ operator. When a unification variable appears in such a type, the next step of computation can be undetermined. The value of that variable might be determined later, but this would be ``too late'' for the unification problems generated at the first occurrence. This is the essential source of incompletness.
+Type-checking can require evaluating recursive functional programs, thanks to the type-level $\mt{fold}$ operator. When a unification variable appears in such a type, the next step of computation can be undetermined. The value of that variable might be determined later, but this would be ``too late'' for the unification problems generated at the first occurrence. This is the essential source of incompleteness.
Nonetheless, the unification engine tends to do reasonably well. Unlike in ML, polymorphism is never inferred in definitions; it must be indicated explicitly by writing out constructor-level parameters. By writing these and other annotations, the programmer can generally get the type inference engine to do most of the type reconstruction work.
@@ -1155,7 +1155,7 @@ $$\begin{array}{l}
\mt{val} \; \mt{sql\_subset\_all} : \mt{tables} :: \{\{\mt{Type}\}\} \to \mt{sql\_subset} \; \mt{tables} \; \mt{tables}
\end{array}$$
-SQL expressions are used in several places, including $\mt{SELECT}$, $\mt{WHERE}$, $\mt{HAVING}$, and $\mt{ORDER} \; \mt{BY}$ clauses. They reify a fragment of the standard SQL expression language, while making it possible to inject ``native'' Ur values in some places. The arguments to the $\mt{sql\_exp}$ type family respectively give the unrestricted-availablity table fields, the table fields that may only be used in arguments to aggregate functions, the available selected expressions, and the type of the expression.
+SQL expressions are used in several places, including $\mt{SELECT}$, $\mt{WHERE}$, $\mt{HAVING}$, and $\mt{ORDER} \; \mt{BY}$ clauses. They reify a fragment of the standard SQL expression language, while making it possible to inject ``native'' Ur values in some places. The arguments to the $\mt{sql\_exp}$ type family respectively give the unrestricted-availability table fields, the table fields that may only be used in arguments to aggregate functions, the available selected expressions, and the type of the expression.
$$\begin{array}{l}
\mt{con} \; \mt{sql\_exp} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \{\mt{Type}\} \to \mt{Type} \to \mt{Type}
\end{array}$$
--
cgit v1.2.3
From 65428eeb2cba9807043188bfddf5fbfd1bf9296b Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 20 Dec 2008 18:24:12 -0500
Subject: Typo report from megacz
---
doc/manual.tex | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 930fd9f9..af905574 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1017,7 +1017,7 @@ Nonetheless, the unification engine tends to do reasonably well. Unlike in ML,
\subsection{Unifying Record Types}
-The type inference engine tries to take advantage of the algebraic rules governing type-level records, as shown in Section \ref{definitional}. When two constructors of record kind are unified, they are reduce to normal forms, with like terms crossed off from each normal form until, hopefully, nothing remains. This cannot be complete, with the inclusion of unification variables. The type-checker can help you understand what goes wrong when the process fails, as it outputs the unmatched remainders of the two normal forms.
+The type inference engine tries to take advantage of the algebraic rules governing type-level records, as shown in Section \ref{definitional}. When two constructors of record kind are unified, they are reduced to normal forms, with like terms crossed off from each normal form until, hopefully, nothing remains. This cannot be complete, with the inclusion of unification variables. The type-checker can help you understand what goes wrong when the process fails, as it outputs the unmatched remainders of the two normal forms.
\subsection{\label{typeclasses}Type Classes}
--
cgit v1.2.3
From 9030684acadec34adb8f08547dffe250ff4449d6 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Wed, 24 Dec 2008 10:48:31 -0500
Subject: More manual bug reports from megacz
---
doc/manual.tex | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index af905574..0e756426 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -435,11 +435,11 @@ $$\infer{\Gamma \vdash [\overline{c_i = c'_i}] :: \{\kappa\}}{
$$\infer{\Gamma \vdash \mt{fold} :: ((\mt{Name} \to \kappa_1 \to \kappa_2 \to \kappa_2) \to \kappa_2 \to \{\kappa_1\} \to \kappa_2}{}$$
-$$\infer{\Gamma \vdash (\overline c) :: (k_1 \times \ldots \times k_n)}{
- \forall i: \Gamma \vdash c_i :: k_i
+$$\infer{\Gamma \vdash (\overline c) :: (\kappa_1 \times \ldots \times \kappa_n)}{
+ \forall i: \Gamma \vdash c_i :: \kappa_i
}
-\quad \infer{\Gamma \vdash c.i :: k_i}{
- \Gamma \vdash c :: (k_1 \times \ldots \times k_n)
+\quad \infer{\Gamma \vdash c.i :: \kappa_i}{
+ \Gamma \vdash c :: (\kappa_1 \times \ldots \times \kappa_n)
}$$
$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c :: \kappa}{
@@ -584,6 +584,7 @@ $$\infer{\Gamma \vdash \{\overline{c = e}\} : \{\overline{c : \tau}\}}{
\quad \infer{\Gamma \vdash e_1 \rc e_2 : \$(c_1 \rc c_2)}{
\Gamma \vdash e_1 : \$c_1
& \Gamma \vdash e_2 : \$c_2
+ & \Gamma \vdash c_1 \sim c_2
}$$
$$\infer{\Gamma \vdash e \rcut c : \$c'}{
@@ -609,7 +610,7 @@ $$\infer{\Gamma \vdash \mt{let} \; \overline{ed} \; \mt{in} \; e \; \mt{end} : \
& \Gamma_i \vdash e_i : \tau
}$$
-$$\infer{\Gamma \vdash [c_1 \sim c_2] \Rightarrow e : [c_1 \sim c_2] \Rightarrow \tau}{
+$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow e : \lambda [c_1 \sim c_2] \Rightarrow \tau}{
\Gamma \vdash c_1 :: \{\kappa\}
& \Gamma \vdash c_2 :: \{\kappa\}
& \Gamma, c_1 \sim c_2 \vdash e : \tau
--
cgit v1.2.3
From d6e16e63172af6e1423df382e359cc9607325042 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 10:16:59 -0400
Subject: Revising manual, through main syntax section
---
doc/manual.tex | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 0e756426..fa6a113f 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -29,13 +29,14 @@
\item Return invalid HTML
\item Contain dead intra-application links
\item Have mismatches between HTML forms and the fields expected by their handlers
+\item Include client-side code that makes incorrect assumptions about the ``AJAX''-style services that the remote web server provides
\item Attempt invalid SQL queries
-\item Use improper marshaling or unmarshaling in communication with SQL databases
+\item Use improper marshaling or unmarshaling in communication with SQL databases or between browsers and web servers
\end{itemize}
This type safety is just the foundation of the Ur/Web methodology. It is also possible to use metaprogramming to build significant application pieces by analysis of type structure. For instance, the demo includes an ML-style functor for building an admin interface for an arbitrary SQL table. The type system guarantees that the admin interface sub-application that comes out will always be free of the above-listed bugs, no matter which well-typed table description is given as input.
-The Ur/Web compiler also produces very efficient object code that does not use garbage collection. These compiled programs will often be even more efficient than what most programmers would bother to write in C.
+The Ur/Web compiler also produces very efficient object code that does not use garbage collection. These compiled programs will often be even more efficient than what most programmers would bother to write in C. The compiler also generates JavaScript versions of client-side code, with no need to write those parts of applications in a different language.
\medskip
@@ -154,9 +155,11 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
\begin{tabular}{rl}
\textbf{\LaTeX} & \textbf{ASCII} \\
$\to$ & \cd{->} \\
+ $\longrightarrow$ & \cd{-->} \\
$\times$ & \cd{*} \\
$\lambda$ & \cd{fn} \\
$\Rightarrow$ & \cd{=>} \\
+ $\Longrightarrow$ & \cd{==>} \\
$\neq$ & \cd{<>} \\
$\leq$ & \cd{<=} \\
$\geq$ & \cd{>=} \\
@@ -182,6 +185,8 @@ $$\begin{array}{rrcll}
&&& \kappa \to \kappa & \textrm{type-level functions} \\
&&& \{\kappa\} & \textrm{type-level records} \\
&&& (\kappa\times^+) & \textrm{type-level tuples} \\
+ &&& X & \textrm{variable} \\
+ &&& X \longrightarrow k & \textrm{kind-polymorphic type-level function} \\
&&& \_\_ & \textrm{wildcard} \\
&&& (\kappa) & \textrm{explicit precedence} \\
\end{array}$$
@@ -199,22 +204,25 @@ $$\begin{array}{rrcll}
\\
&&& \tau \to \tau & \textrm{function type} \\
&&& x \; ? \; \kappa \to \tau & \textrm{polymorphic function type} \\
+ &&& X \longrightarrow \tau & \textrm{kind-polymorphic function type} \\
&&& \$ c & \textrm{record type} \\
\\
&&& c \; c & \textrm{type-level function application} \\
&&& \lambda x \; :: \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
\\
+ &&& X \Longrightarrow c & \textrm{type-level kind-polymorphic function abstraction} \\
+ \\
&&& () & \textrm{type-level unit} \\
&&& \#X & \textrm{field name} \\
\\
&&& [(c = c)^*] & \textrm{known-length type-level record} \\
&&& c \rc c & \textrm{type-level record concatenation} \\
- &&& \mt{fold} & \textrm{type-level record fold} \\
+ &&& \mt{map} & \textrm{type-level record map} \\
\\
&&& (c,^+) & \textrm{type-level tuple} \\
&&& c.n & \textrm{type-level tuple projection ($n \in \mathbb N^+$)} \\
\\
- &&& \lambda [c \sim c] \Rightarrow c & \textrm{guarded constructor} \\
+ &&& [c \sim c] \Rightarrow \tau & \textrm{guarded type} \\
\\
&&& \_ :: \kappa & \textrm{wildcard} \\
&&& (c) & \textrm{explicit precedence} \\
@@ -273,13 +281,13 @@ $$\begin{array}{rrcll}
&&& \lambda x : \tau \Rightarrow e & \textrm{function abstraction} \\
&&& e [c] & \textrm{polymorphic function application} \\
&&& \lambda x \; ? \; \kappa \Rightarrow e & \textrm{polymorphic function abstraction} \\
+ &&& X \Longrightarrow e & \textrm{kind-polymorphic function abstraction} \\
\\
&&& \{(c = e,)^*\} & \textrm{known-length record} \\
&&& e.c & \textrm{record field projection} \\
&&& e \rc e & \textrm{record concatenation} \\
&&& e \rcut c & \textrm{removal of a single record field} \\
&&& e \rcutM c & \textrm{removal of multiple record fields} \\
- &&& \mt{fold} & \textrm{fold over fields of a type-level record} \\
\\
&&& \mt{let} \; ed^* \; \mt{in} \; e \; \mt{end} & \textrm{local definitions} \\
\\
--
cgit v1.2.3
From d3248ccc1d79b3a18704fd5549371c20e3f7bada Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 10:38:13 -0400
Subject: Revise manual, through end of Syntax
---
doc/manual.tex | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index fa6a113f..d8578168 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -248,8 +248,8 @@ $$\begin{array}{rrcll}
&&& \mt{signature} \; X = S & \textrm{sub-signature} \\
&&& \mt{include} \; S & \textrm{signature inclusion} \\
&&& \mt{constraint} \; c \sim c & \textrm{record disjointness constraint} \\
- &&& \mt{class} \; x & \textrm{abstract type class} \\
- &&& \mt{class} \; x = c & \textrm{concrete type class} \\
+ &&& \mt{class} \; x :: \kappa & \textrm{abstract constructor class} \\
+ &&& \mt{class} \; x :: \kappa = c & \textrm{concrete constructor class} \\
\\
\textrm{Datatype constructors} & dc &::=& X & \textrm{nullary constructor} \\
&&& X \; \mt{of} \; \tau & \textrm{unary constructor} \\
@@ -293,7 +293,8 @@ $$\begin{array}{rrcll}
\\
&&& \mt{case} \; e \; \mt{of} \; (p \Rightarrow e|)^+ & \textrm{pattern matching} \\
\\
- &&& \lambda [c \sim c] \Rightarrow e & \textrm{guarded expression} \\
+ &&& \lambda [c \sim c] \Rightarrow e & \textrm{guarded expression abstraction} \\
+ &&& e \; ! & \textrm{guarded expression application} \\
\\
&&& \_ & \textrm{wildcard} \\
&&& (e) & \textrm{explicit precedence} \\
@@ -317,7 +318,7 @@ $$\begin{array}{rrcll}
&&& \mt{table} \; x : c & \textrm{SQL table} \\
&&& \mt{sequence} \; x & \textrm{SQL sequence} \\
&&& \mt{cookie} \; x : \tau & \textrm{HTTP cookie} \\
- &&& \mt{class} \; x = c & \textrm{concrete type class} \\
+ &&& \mt{class} \; x :: \kappa = c & \textrm{concrete constructor class} \\
\\
\textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \textrm{constant} \\
&&& X & \textrm{variable} \\
@@ -340,17 +341,17 @@ The notation $[c_1, \ldots, c_n]$ is shorthand for $[c_1 = (), \ldots, c_n = ()]
A tuple type $(\tau_1, \ldots, \tau_n)$ expands to a record type $\{1 = \tau_1, \ldots, n = \tau_n\}$, with natural numbers as field names. A tuple pattern $(p_1, \ldots, p_n)$ expands to a rigid record pattern $\{1 = p_1, \ldots, n = p_n\}$. Positive natural numbers may be used in most places where field names would be allowed.
-In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid [c \sim c]$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
+In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid X$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
For any signature item or declaration that defines some entity to be equal to $A$ with classification annotation $B$ (e.g., $\mt{val} \; x : B = A$), $B$ and the preceding colon (or similar punctuation) may be omitted, in which case it is filled in as a wildcard.
A signature item or declaration $\mt{type} \; x$ or $\mt{type} \; x = \tau$ is elaborated into $\mt{con} \; x :: \mt{Type}$ or $\mt{con} \; x :: \mt{Type} = \tau$, respectively.
-A signature item or declaration $\mt{class} \; x = \lambda y :: \mt{Type} \Rightarrow c$ may be abbreviated $\mt{class} \; x \; y = c$.
+A signature item or declaration $\mt{class} \; x = \lambda y \Rightarrow c$ may be abbreviated $\mt{class} \; x \; y = c$.
-Handling of implicit and explicit constructor arguments may be tweaked with some prefixes to variable references. An expression $@x$ is a version of $x$ where all implicit constructor arguments have been made explicit. An expression $@@x$ achieves the same effect, additionally halting automatic resolution of type class instances. The same syntax works for variables projected out of modules and for capitalized variables (datatype constructors).
+Handling of implicit and explicit constructor arguments may be tweaked with some prefixes to variable references. An expression $@x$ is a version of $x$ where all implicit constructor arguments have been made explicit. An expression $@@x$ achieves the same effect, additionally halting automatic resolution of type class instances and automatic proving of disjointness constraints. The default is that any prefix of a variable's type consisting only of implicit polymorphism, type class instances, and disjointness obligations is resolved automatically, with the variable treated as having the type that starts after the last implicit element, with suitable unification variables substituted. The same syntax works for variables projected out of modules and for capitalized variables (datatype constructors).
-At the expression level, an analogue is available of the composite $\lambda$ form for constructors. We define the language of binders as $b ::= x \mid (x : \tau) \mid (x \; ? \; \kappa) \mid [c \sim c]$. A lone variable $x$ as a binder stands for an expression variable of unspecified type.
+At the expression level, an analogue is available of the composite $\lambda$ form for constructors. We define the language of binders as $b ::= x \mid (x : \tau) \mid (x \; ? \; \kappa) \mid X \mid [c \sim c]$. A lone variable $x$ as a binder stands for an expression variable of unspecified type.
A $\mt{val}$ or $\mt{val} \; \mt{rec}$ declaration may include expression binders before the equal sign, following the binder grammar from the last paragraph. Such declarations are elaborated into versions that add additional $\lambda$s to the fronts of the righthand sides, as appropriate. The keyword $\mt{fun}$ is a synonym for $\mt{val} \; \mt{rec}$.
--
cgit v1.2.3
From f0ac4ec56223cd1c76a6d2b120b62373cf4f915c Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 11:18:54 -0400
Subject: Revise manual, through static semantics
---
doc/manual.tex | 126 ++++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 84 insertions(+), 42 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index d8578168..d2a58042 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -211,6 +211,7 @@ $$\begin{array}{rrcll}
&&& \lambda x \; :: \; \kappa \Rightarrow c & \textrm{type-level function abstraction} \\
\\
&&& X \Longrightarrow c & \textrm{type-level kind-polymorphic function abstraction} \\
+ &&& c [\kappa] & \textrm{type-level kind-polymorphic function application} \\
\\
&&& () & \textrm{type-level unit} \\
&&& \#X & \textrm{field name} \\
@@ -231,6 +232,8 @@ $$\begin{array}{rrcll}
&&& M.x & \textrm{projection from a module} \\
\end{array}$$
+We include both abstraction and application for kind polymorphism, but applications are only inferred internally; they may not be written explicitly in source programs.
+
Modules of the module system are described by \emph{signatures}.
$$\begin{array}{rrcll}
\textrm{Signatures} & S &::=& \mt{sig} \; s^* \; \mt{end} & \textrm{constant} \\
@@ -281,6 +284,7 @@ $$\begin{array}{rrcll}
&&& \lambda x : \tau \Rightarrow e & \textrm{function abstraction} \\
&&& e [c] & \textrm{polymorphic function application} \\
&&& \lambda x \; ? \; \kappa \Rightarrow e & \textrm{polymorphic function abstraction} \\
+ &&& e [\kappa] & \textrm{kind-polymorphic function application} \\
&&& X \Longrightarrow e & \textrm{kind-polymorphic function abstraction} \\
\\
&&& \{(c = e,)^*\} & \textrm{known-length record} \\
@@ -303,6 +307,8 @@ $$\begin{array}{rrcll}
&&& \cd{val} \; \cd{rec} \; (x : \tau = e \; \cd{and})^+ & \textrm{mutually-recursive values} \\
\end{array}$$
+As with constructors, we include both abstraction and application for kind polymorphism, but applications are only inferred internally.
+
\emph{Declarations} primarily bring new symbols into context.
$$\begin{array}{rrcll}
\textrm{Declarations} & d &::=& \mt{con} \; x :: \kappa = c & \textrm{constructor synonym} \\
@@ -374,6 +380,7 @@ In this section, we give a declarative presentation of Ur's typing rules and rel
Since there is significant mutual recursion among the judgments, we introduce them all before beginning to give rules. We use the same variety of contexts throughout this section, implicitly introducing new sorts of context entries as needed.
\begin{itemize}
+\item $\Gamma \vdash \kappa$ expresses kind well-formedness.
\item $\Gamma \vdash c :: \kappa$ assigns a kind to a constructor in a context.
\item $\Gamma \vdash c \sim c$ proves the disjointness of two record constructors; that is, that they share no field names. We overload the judgment to apply to pairs of field names as well.
\item $\Gamma \vdash c \hookrightarrow C$ proves that record constructor $c$ decomposes into set $C$ of field names and record constructors.
@@ -388,8 +395,34 @@ Since there is significant mutual recursion among the judgments, we introduce th
\item $\mt{selfify}(M, \overline{s})$ adds information to signature items $\overline{s}$ to reflect the fact that we are concerned with the particular module $M$. This function is overloaded to work over individual signature items as well.
\end{itemize}
+
+\subsection{Kind Well-Formedness}
+
+$$\infer{\Gamma \vdash \mt{Type}}{}
+\quad \infer{\Gamma \vdash \mt{Unit}}{}
+\quad \infer{\Gamma \vdash \mt{Name}}{}
+\quad \infer{\Gamma \vdash \kappa_1 \to \kappa_2}{
+ \Gamma \vdash \kappa_1
+ & \Gamma \vdash \kappa_2
+}
+\quad \infer{\Gamma \vdash \{\kappa\}}{
+ \Gamma \vdash \kappa
+}
+\quad \infer{\Gamma \vdash (\kappa_1 \times \ldots \times \kappa_n)}{
+ \forall i: \Gamma \vdash \kappa_i
+}$$
+
+$$\infer{\Gamma \vdash X}{
+ X \in \Gamma
+}
+\quad \infer{\Gamma \vdash X \longrightarrow \kappa}{
+ \Gamma, X \vdash \kappa
+}$$
+
\subsection{Kinding}
+We write $[X \mapsto \kappa_1]\kappa_2$ for capture-avoiding substitution of $\kappa_1$ for $X$ in $\kappa_2$.
+
$$\infer{\Gamma \vdash (c) :: \kappa :: \kappa}{
\Gamma \vdash c :: \kappa
}
@@ -416,6 +449,9 @@ $$\infer{\Gamma \vdash \tau_1 \to \tau_2 :: \mt{Type}}{
\quad \infer{\Gamma \vdash x \; ? \: \kappa \to \tau :: \mt{Type}}{
\Gamma, x :: \kappa \vdash \tau :: \mt{Type}
}
+\quad \infer{\Gamma \vdash X \longrightarrow \tau :: \mt{Type}}{
+ \Gamma, X \vdash \tau :: \mt{Type}
+}
\quad \infer{\Gamma \vdash \$c :: \mt{Type}}{
\Gamma \vdash c :: \{\mt{Type}\}
}$$
@@ -428,6 +464,14 @@ $$\infer{\Gamma \vdash c_1 \; c_2 :: \kappa_2}{
\Gamma, x :: \kappa_1 \vdash c :: \kappa_2
}$$
+$$\infer{\Gamma \vdash c[\kappa'] :: [X \mapsto \kappa']\kappa}{
+ \Gamma \vdash c :: X \to \kappa
+ & \Gamma \vdash \kappa'
+}
+\quad \infer{\Gamma \vdash X \Longrightarrow c :: X \to \kappa}{
+ \Gamma, X \vdash c :: \kappa
+}$$
+
$$\infer{\Gamma \vdash () :: \mt{Unit}}{}
\quad \infer{\Gamma \vdash \#X :: \mt{Name}}{}$$
@@ -442,7 +486,7 @@ $$\infer{\Gamma \vdash [\overline{c_i = c'_i}] :: \{\kappa\}}{
& \Gamma \vdash c_1 \sim c_2
}$$
-$$\infer{\Gamma \vdash \mt{fold} :: ((\mt{Name} \to \kappa_1 \to \kappa_2 \to \kappa_2) \to \kappa_2 \to \{\kappa_1\} \to \kappa_2}{}$$
+$$\infer{\Gamma \vdash \mt{map} :: (\kappa_1 \to \kappa_2) \to \{\kappa_1\} \to \{\kappa_2\}}{}$$
$$\infer{\Gamma \vdash (\overline c) :: (\kappa_1 \times \ldots \times \kappa_n)}{
\forall i: \Gamma \vdash c_i :: \kappa_i
@@ -451,16 +495,14 @@ $$\infer{\Gamma \vdash (\overline c) :: (\kappa_1 \times \ldots \times \kappa_n)
\Gamma \vdash c :: (\kappa_1 \times \ldots \times \kappa_n)
}$$
-$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c :: \kappa}{
- \Gamma \vdash c_1 :: \{\kappa'\}
+$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow \tau :: \mt{Type}}{
+ \Gamma \vdash c_1 :: \{\kappa\}
& \Gamma \vdash c_2 :: \{\kappa'\}
- & \Gamma, c_1 \sim c_2 \vdash c :: \kappa
+ & \Gamma, c_1 \sim c_2 \vdash \tau :: \mt{Type}
}$$
\subsection{Record Disjointness}
-We will use a keyword $\mt{map}$ as a shorthand, such that, for $f$ of kind $\kappa \to \kappa'$, $\mt{map} \; f$ stands for $\mt{fold} \; (\lambda (x_1 :: \mt{Name}) (x_2 :: \kappa) (x_3 :: \{\kappa'\}) \Rightarrow [x_1 = f \; x_2] \rc x_3) \; []$.
-
$$\infer{\Gamma \vdash c_1 \sim c_2}{
\Gamma \vdash c_1 \hookrightarrow C_1
& \Gamma \vdash c_2 \hookrightarrow C_2
@@ -494,7 +536,7 @@ $$\infer{\Gamma \vdash c \hookrightarrow \{c\}}{}
\subsection{\label{definitional}Definitional Equality}
-We use $\mathcal C$ to stand for a one-hole context that, when filled, yields a constructor. The notation $\mathcal C[c]$ plugs $c$ into $\mathcal C$. We omit the standard definition of one-hole contexts. We write $[x \mapsto c_1]c_2$ for capture-avoiding substitution of $c_1$ for $x$ in $c_2$.
+We use $\mathcal C$ to stand for a one-hole context that, when filled, yields a constructor. The notation $\mathcal C[c]$ plugs $c$ into $\mathcal C$. We omit the standard definition of one-hole contexts. We write $[x \mapsto c_1]c_2$ for capture-avoiding substitution of $c_1$ for $x$ in $c_2$, with analogous notation for substituting a kind in a constructor.
$$\infer{\Gamma \vdash c \equiv c}{}
\quad \infer{\Gamma \vdash c_1 \equiv c_2}{
@@ -518,21 +560,20 @@ $$\infer{\Gamma \vdash x \equiv c}{
\quad \infer{\Gamma \vdash (\overline c).i \equiv c_i}{}$$
$$\infer{\Gamma \vdash (\lambda x :: \kappa \Rightarrow c) \; c' \equiv [x \mapsto c'] c}{}
-\quad \infer{\Gamma \vdash c_1 \rc c_2 \equiv c_2 \rc c_1}{}
+\quad \infer{\Gamma \vdash (X \Longrightarrow c) [\kappa] \equiv [X \mapsto \kappa] c}{}$$
+
+$$\infer{\Gamma \vdash c_1 \rc c_2 \equiv c_2 \rc c_1}{}
\quad \infer{\Gamma \vdash c_1 \rc (c_2 \rc c_3) \equiv (c_1 \rc c_2) \rc c_3}{}$$
$$\infer{\Gamma \vdash [] \rc c \equiv c}{}
\quad \infer{\Gamma \vdash [\overline{c_1 = c'_1}] \rc [\overline{c_2 = c'_2}] \equiv [\overline{c_1 = c'_1}, \overline{c_2 = c'_2}]}{}$$
-$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow c \equiv c}{
- \Gamma \vdash c_1 \sim c_2
-}
-\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; [] \equiv i}{}
-\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; ([c_1 = c_2] \rc c) \equiv f \; c_1 \; c_2 \; (\mt{fold} \; f \; i \; c)}{}$$
+$$\infer{\Gamma \vdash \mt{map} \; f \; [] \equiv []}{}
+\quad \infer{\Gamma \vdash \mt{map} \; f \; ([c_1 = c_2] \rc c) \equiv [c_1 = f \; c_2] \rc \mt{map} \; f \; c}{}$$
$$\infer{\Gamma \vdash \mt{map} \; (\lambda x \Rightarrow x) \; c \equiv c}{}
-\quad \infer{\Gamma \vdash \mt{fold} \; f \; i \; (\mt{map} \; f' \; c)
- \equiv \mt{fold} \; (\lambda (x_1 :: \mt{Name}) (x_2 :: \kappa) \Rightarrow f \; x_1 \; (f' \; x_2)) \; i \; c}{}$$
+\quad \infer{\Gamma \vdash \mt{map} \; f \; (\mt{map} \; f' \; c)
+ \equiv \mt{map} \; (\lambda x \Rightarrow f \; (f' \; x)) \; c}{}$$
$$\infer{\Gamma \vdash \mt{map} \; f \; (c_1 \rc c_2) \equiv \mt{map} \; f \; c_1 \rc \mt{map} \; f \; c_2}{}$$
@@ -582,6 +623,14 @@ $$\infer{\Gamma \vdash e [c] : [x \mapsto c]\tau}{
\Gamma, x :: \kappa \vdash e : \tau
}$$
+$$\infer{\Gamma \vdash e [\kappa] : [X \mapsto \kappa]\tau}{
+ \Gamma \vdash e : X \longrightarrow \tau
+ & \Gamma \vdash \kappa
+}
+\quad \infer{\Gamma \vdash X \Longrightarrow e : X \longrightarrow \tau}{
+ \Gamma, X \vdash e : \tau
+}$$
+
$$\infer{\Gamma \vdash \{\overline{c = e}\} : \{\overline{c : \tau}\}}{
\forall i: \Gamma \vdash c_i :: \mt{Name}
& \Gamma \vdash e_i : \tau_i
@@ -603,13 +652,6 @@ $$\infer{\Gamma \vdash e \rcut c : \$c'}{
\Gamma \vdash e : \$(c \rc c')
}$$
-$$\infer{\Gamma \vdash \mt{fold} : \begin{array}{c}
- x_1 :: (\{\kappa\} \to \tau)
- \to (x_2 :: \mt{Name} \to x_3 :: \kappa \to x_4 :: \{\kappa\} \to \lambda [[x_2] \sim x_4]
- \Rightarrow x_1 \; x_4 \to x_1 \; ([x_2 = x_3] \rc x_4)) \\
- \to x_1 \; [] \to x_5 :: \{\kappa\} \to x_1 \; x_5
- \end{array}}{}$$
-
$$\infer{\Gamma \vdash \mt{let} \; \overline{ed} \; \mt{in} \; e \; \mt{end} : \tau}{
\Gamma \vdash \overline{ed} \leadsto \Gamma'
& \Gamma' \vdash e : \tau
@@ -621,7 +663,7 @@ $$\infer{\Gamma \vdash \mt{let} \; \overline{ed} \; \mt{in} \; e \; \mt{end} : \
$$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow e : \lambda [c_1 \sim c_2] \Rightarrow \tau}{
\Gamma \vdash c_1 :: \{\kappa\}
- & \Gamma \vdash c_2 :: \{\kappa\}
+ & \Gamma \vdash c_2 :: \{\kappa'\}
& \Gamma, c_1 \sim c_2 \vdash e : \tau
}$$
@@ -665,7 +707,7 @@ $$\infer{\Gamma \vdash \{\overline{x = p}\} \leadsto \Gamma_n; \{\overline{x = \
We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'$, expressing the enrichment of $\Gamma$ with the types of the datatype constructors $\overline{dc}$, when they are known to belong to datatype $x$ with type parameters $\overline{y}$.
-This is the first judgment where we deal with type classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. Section \ref{typeclasses} gives an informal description of how type classes influence type inference.
+This is the first judgment where we deal with constructor classes, for the $\mt{class}$ declaration form. We will omit their special handling in this formal specification. Section \ref{typeclasses} gives an informal description of how constructor classes influence type inference.
We presuppose the existence of a function $\mathcal O$, where $\mathcal O(M, \overline{s})$ implements the $\mt{open}$ declaration by producing a context with the appropriate entry for each available component of module $M$ with signature items $\overline{s}$. Where possible, $\mathcal O$ uses ``transparent'' entries (e.g., an abstract type $M.x$ is mapped to $x :: \mt{Type} = M.x$), so that the relationship with $M$ is maintained. A related function $\mathcal O_c$ builds a context containing the disjointness constraints found in $\overline s$.
We write $\kappa_1^n \to \kappa$ as a shorthand, where $\kappa_1^0 \to \kappa = \kappa$ and $\kappa_1^{n+1} \to \kappa_2 = \kappa_1 \to (\kappa_1^n \to \kappa_2)$. We write $\mt{len}(\overline{y})$ for the length of vector $\overline{y}$ of variables.
@@ -732,8 +774,8 @@ $$\infer{\Gamma \vdash \mt{cookie} \; x : \tau \leadsto \Gamma, x : \mt{Basis}.\
\Gamma \vdash \tau :: \mt{Type}
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x = c \leadsto \Gamma, x :: \mt{Type} \to \mt{Type} = c}{
- \Gamma \vdash c :: \mt{Type} \to \mt{Type}
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa \to \mt{Type} = c}{
+ \Gamma \vdash c :: \kappa \to \mt{Type}
}$$
$$\infer{\overline{y}; x; \Gamma \vdash \cdot \leadsto \Gamma}{}
@@ -789,10 +831,10 @@ $$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma, c_1 \sim
& \Gamma \vdash c_2 :: \{\kappa\}
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x = c \leadsto \Gamma, x :: \mt{Type} \to \mt{Type} = c}{
- \Gamma \vdash c :: \mt{Type} \to \mt{Type}
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa \to \mt{Type} = c}{
+ \Gamma \vdash c :: \kappa \to \mt{Type}
}
-\quad \infer{\Gamma \vdash \mt{class} \; x \leadsto \Gamma, x :: \mt{Type} \to \mt{Type}}{}$$
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa \leadsto \Gamma, x :: \kappa \to \mt{Type}}{}$$
\subsection{Signature Compatibility}
@@ -852,13 +894,13 @@ $$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{con} \
& \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x \leq \mt{con} \; x :: \mt{Type} \to \mt{Type}}{}
-\quad \infer{\Gamma \vdash \mt{class} \; x = c \leq \mt{con} \; x :: \mt{Type} \to \mt{Type}}{}$$
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa \leq \mt{con} \; x :: \kappa \to \mt{Type}}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leq \mt{con} \; x :: \kappa \to \mt{Type}}{}$$
$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \mt{\kappa} = c_2}{
\Gamma \vdash c_1 \equiv c_2
}
-\quad \infer{\Gamma \vdash \mt{class} \; x = c_1 \leq \mt{con} \; x :: \mt{Type} \to \mt{Type} = c_2}{
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \kappa \to \mt{Type} = c_2}{
\Gamma \vdash c_1 \equiv c_2
}$$
@@ -901,9 +943,9 @@ $$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leq \mt{constraint} \; c
& \Gamma \vdash c_2 \equiv c'_2
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x \leq \mt{class} \; x}{}
-\quad \infer{\Gamma \vdash \mt{class} \; x = c \leq \mt{class} \; x}{}
-\quad \infer{\Gamma \vdash \mt{class} \; x = c_1 \leq \mt{class} \; x = c_2}{
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa \leq \mt{class} \; x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leq \mt{class} \; x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c_1 \leq \mt{class} \; x :: \kappa = c_2}{
\Gamma \vdash c_1 \equiv c_2
}$$
@@ -954,7 +996,7 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{sigOf}(\mt{table} \; x : c) &=& \mt{table} \; x : c \\
\mt{sigOf}(\mt{sequence} \; x) &=& \mt{sequence} \; x \\
\mt{sigOf}(\mt{cookie} \; x : \tau) &=& \mt{cookie} \; x : \tau \\
- \mt{sigOf}(\mt{class} \; x = c) &=& \mt{class} \; x = c \\
+ \mt{sigOf}(\mt{class} \; x :: \kappa = c) &=& \mt{class} \; x :: \kappa = c \\
\end{eqnarray*}
\begin{eqnarray*}
\mt{selfify}(M, \cdot) &=& \cdot \\
@@ -969,8 +1011,8 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{selfify}(M, \mt{signature} \; X = S) &=& \mt{signature} \; X = S \\
\mt{selfify}(M, \mt{include} \; S) &=& \mt{include} \; S \\
\mt{selfify}(M, \mt{constraint} \; c_1 \sim c_2) &=& \mt{constraint} \; c_1 \sim c_2 \\
- \mt{selfify}(M, \mt{class} \; x) &=& \mt{class} \; x = M.x \\
- \mt{selfify}(M, \mt{class} \; x = c) &=& \mt{class} \; x = c \\
+ \mt{selfify}(M, \mt{class} \; x :: \kappa) &=& \mt{class} \; x :: \kappa = M.x \\
+ \mt{selfify}(M, \mt{class} \; x :: \kappa = c) &=& \mt{class} \; x :: \kappa = c \\
\end{eqnarray*}
\subsection{Module Projection}
@@ -981,8 +1023,8 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{con} \; x) &=& \mt{Type}^{\mt{len}(\overline{y})} \to \mt{Type} \\
\mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z \; \overline{s}, \mt{con} \; x) &=& (\mt{Type}^{\mt{len}(\overline{y})} \to \mt{Type}, M'.z) \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$} \\
&& \textrm{and $\mt{proj}(M', \overline{s'}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})$)} \\
- \mt{proj}(M, \mt{class} \; x \; \overline{s}, \mt{con} \; x) &=& \mt{Type} \to \mt{Type} \\
- \mt{proj}(M, \mt{class} \; x = c \; \overline{s}, \mt{con} \; x) &=& (\mt{Type} \to \mt{Type}, c) \\
+ \mt{proj}(M, \mt{class} \; x :: \kappa \; \overline{s}, \mt{con} \; x) &=& \kappa \to \mt{Type} \\
+ \mt{proj}(M, \mt{class} \; x :: \kappa = c \; \overline{s}, \mt{con} \; x) &=& (\kappa \to \mt{Type}, c) \\
\\
\mt{proj}(M, \mt{datatype} \; x \; \overline{y} = \overline{dc} \; \overline{s}, \mt{datatype} \; x) &=& (\overline{y}, \overline{dc}) \\
\mt{proj}(M, \mt{datatype} \; x = \mt{datatype} \; M'.z \; \overline{s}, \mt{con} \; x) &=& \mt{proj}(M', \overline{s'}, \mt{datatype} \; z) \textrm{ (where $\Gamma \vdash M' : \mt{sig} \; \overline{s'} \; \mt{end}$)} \\
@@ -1008,8 +1050,8 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{proj}(M, \mt{signature} \; X = S \; \overline{s}, V) &=& [X \mapsto M.X]\mt{proj}(M, \overline{s}, V) \\
\mt{proj}(M, \mt{include} \; S \; \overline{s}, V) &=& \mt{proj}(M, \overline{s'} \; \overline{s}, V) \textrm{ (where $\Gamma \vdash S \equiv \mt{sig} \; \overline{s'} \; \mt{end}$)} \\
\mt{proj}(M, \mt{constraint} \; c_1 \sim c_2 \; \overline{s}, V) &=& \mt{proj}(M, \overline{s}, V) \\
- \mt{proj}(M, \mt{class} \; x \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
- \mt{proj}(M, \mt{class} \; x = c \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{class} \; x :: \kappa \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
+ \mt{proj}(M, \mt{class} \; x :: \kappa = c \; \overline{s}, V) &=& [x \mapsto M.x]\mt{proj}(M, \overline{s}, V) \\
\end{eqnarray*}
--
cgit v1.2.3
From b2fb4c78b1713f2de24c7f476d462fcca9a27ddb Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 11:27:23 -0400
Subject: Revise type inference section
---
doc/manual.tex | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index d2a58042..b7925194 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1063,7 +1063,7 @@ The Ur/Web compiler uses \emph{heuristic type inference}, with no claims of comp
Type-checkers for languages based on the Hindley-Milner type discipline, like ML and Haskell, take advantage of \emph{principal typing} properties, making complete type inference relatively straightforward. Inference algorithms are traditionally implemented using type unification variables, at various points asserting equalities between types, in the process discovering the values of type variables. The Ur/Web compiler uses the same basic strategy, but the complexity of the type system rules out easy completeness.
-Type-checking can require evaluating recursive functional programs, thanks to the type-level $\mt{fold}$ operator. When a unification variable appears in such a type, the next step of computation can be undetermined. The value of that variable might be determined later, but this would be ``too late'' for the unification problems generated at the first occurrence. This is the essential source of incompleteness.
+Type-checking can require evaluating recursive functional programs, thanks to the type-level $\mt{map}$ operator. When a unification variable appears in such a type, the next step of computation can be undetermined. The value of that variable might be determined later, but this would be ``too late'' for the unification problems generated at the first occurrence. This is the essential source of incompleteness.
Nonetheless, the unification engine tends to do reasonably well. Unlike in ML, polymorphism is never inferred in definitions; it must be indicated explicitly by writing out constructor-level parameters. By writing these and other annotations, the programmer can generally get the type inference engine to do most of the type reconstruction work.
@@ -1071,23 +1071,23 @@ Nonetheless, the unification engine tends to do reasonably well. Unlike in ML,
The type inference engine tries to take advantage of the algebraic rules governing type-level records, as shown in Section \ref{definitional}. When two constructors of record kind are unified, they are reduced to normal forms, with like terms crossed off from each normal form until, hopefully, nothing remains. This cannot be complete, with the inclusion of unification variables. The type-checker can help you understand what goes wrong when the process fails, as it outputs the unmatched remainders of the two normal forms.
-\subsection{\label{typeclasses}Type Classes}
+\subsection{\label{typeclasses}Constructor Classes}
-Ur includes a type class facility inspired by Haskell's. The current version is very rudimentary, only supporting instances for particular types built up from abstract types and datatypes and type-level application.
+Ur includes a constructor class facility inspired by Haskell's. The current version is very rudimentary, only supporting instances for particular constructors built up from abstract constructors and datatypes and type-level application.
-Type classes are integrated with the module system. A type class is just a constructor of kind $\mt{Type} \to \mt{Type}$. By marking such a constructor $c$ as a type class, the programmer instructs the type inference engine to, in each scope, record all values of types $c \; \tau$ as \emph{instances}. Any function argument whose type is of such a form is treated as implicit, to be determined by examining the current instance database.
+Constructor classes are integrated with the module system. A constructor class of kind $\kappa$ is just a constructor of kind $\kappa \to \mt{Type}$. By marking such a constructor $c$ as a constructor class, the programmer instructs the type inference engine to, in each scope, record all values of types $c \; c'$ as \emph{instances}. Any function argument whose type is of such a form is treated as implicit, to be determined by examining the current instance database.
-The ``dictionary encoding'' often used in Haskell implementations is made explicit in Ur. Type class instances are just properly-typed values, and they can also be considered as ``proofs'' of membership in the class. In some cases, it is useful to pass these proofs around explicitly. An underscore written where a proof is expected will also be inferred, if possible, from the current instance database.
+The ``dictionary encoding'' often used in Haskell implementations is made explicit in Ur. Constructor class instances are just properly-typed values, and they can also be considered as ``proofs'' of membership in the class. In some cases, it is useful to pass these proofs around explicitly. An underscore written where a proof is expected will also be inferred, if possible, from the current instance database.
-Just as for types, type classes may be exported from modules, and they may be exported as concrete or abstract. Concrete type classes have their ``real'' definitions exposed, so that client code may add new instances freely. Abstract type classes are useful as ``predicates'' that can be used to enforce invariants, as we will see in some definitions of SQL syntax in the Ur/Web standard library.
+Just as for constructors, constructors classes may be exported from modules, and they may be exported as concrete or abstract. Concrete constructor classes have their ``real'' definitions exposed, so that client code may add new instances freely. Abstract constructor classes are useful as ``predicates'' that can be used to enforce invariants, as we will see in some definitions of SQL syntax in the Ur/Web standard library.
\subsection{Reverse-Engineering Record Types}
-It's useful to write Ur functions and functors that take record constructors as inputs, but these constructors can grow quite long, even though their values are often implied by other arguments. The compiler uses a simple heuristic to infer the values of unification variables that are folded over, yielding known results. Often, as in the case of $\mt{map}$-like folds, the base and recursive cases of a fold produce constructors with different top-level structure. Thus, if the result of the fold is known, examining its top-level structure reveals whether the record being folded over is empty or not. If it's empty, we're done; if it's not empty, we replace a single unification variable with a new constructor formed from three new unification variables, as in $[\alpha = \beta] \rc \gamma$. This process can often be repeated to determine a unification variable fully.
+It's useful to write Ur functions and functors that take record constructors as inputs, but these constructors can grow quite long, even though their values are often implied by other arguments. The compiler uses a simple heuristic to infer the values of unification variables that are mapped over, yielding known results. If the result is empty, we're done; if it's not empty, we replace a single unification variable with a new constructor formed from three new unification variables, as in $[\alpha = \beta] \rc \gamma$. This process can often be repeated to determine a unification variable fully.
\subsection{Implicit Arguments in Functor Applications}
-Constructor, constraint, and type class witness members of structures may be omitted, when those structures are used in contexts where their assigned signatures imply how to fill in those missing members. This feature combines well with reverse-engineering to allow for uses of complicated meta-programming functors with little more code than would be necessary to invoke an untyped, ad-hoc code generator.
+Constructor, constraint, and constructor class witness members of structures may be omitted, when those structures are used in contexts where their assigned signatures imply how to fill in those missing members. This feature combines well with reverse-engineering to allow for uses of complicated meta-programming functors with little more code than would be necessary to invoke an untyped, ad-hoc code generator.
\section{The Ur Standard Library}
@@ -1284,7 +1284,7 @@ $$\begin{array}{l}
\mt{val} \; \mt{sql\_mod} : \mt{sql\_binary} \; \mt{int} \; \mt{int} \; \mt{int}
\end{array}$$
-Finally, we have aggregate functions. The $\mt{COUNT(\ast)}$ syntax is handled specially, since it takes no real argument. The other aggregate functions are placed into a general type family, using type classes to restrict usage to properly-typed arguments. The key aspect of the $\mt{sql\_aggregate}$ function's type is the shift of aggregate-function-only fields into unrestricted fields.
+Finally, we have aggregate functions. The $\mt{COUNT(\ast)}$ syntax is handled specially, since it takes no real argument. The other aggregate functions are placed into a general type family, using constructor classes to restrict usage to properly-typed arguments. The key aspect of the $\mt{sql\_aggregate}$ function's type is the shift of aggregate-function-only fields into unrestricted fields.
$$\begin{array}{l}
\mt{val} \; \mt{sql\_count} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{int}
--
cgit v1.2.3
From daeb6356944c37fceb2325ab28e696200d7d0988 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 11:36:27 -0400
Subject: Describe folders
---
doc/manual.tex | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b7925194..7a0a0002 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1110,6 +1110,22 @@ $$\begin{array}{l}
\mt{datatype} \; \mt{option} \; \mt{t} = \mt{None} \mid \mt{Some} \; \mt{of} \; \mt{t}
\end{array}$$
+Another important generic Ur element comes at the beginning of \texttt{top.urs}.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{folder} :: \mt{K} \longrightarrow \{\mt{K}\} \to \mt{Type} \\
+ \\
+ \mt{val} \; \mt{fold} : \mt{K} \longrightarrow \mt{tf} :: (\{\mt{K}\} \to \mt{Type}) \\
+ \hspace{.1in} \to (\mt{nm} :: \mt{Name} \to \mt{v} :: \mt{K} \to \mt{r} :: \{\mt{K}\} \to [[\mt{nm}] \sim \mt{r}] \Rightarrow \\
+ \hspace{.2in} \mt{tf} \; \mt{r} \to \mt{tf} \; ([\mt{nm} = \mt{v}] \rc \mt{r})) \\
+ \hspace{.1in} \to \mt{tf} \; [] \\
+ \hspace{.1in} \to \mt{r} :: \{\mt{K}\} \to \mt{folder} \; \mt{r} \to \mt{tf} \; \mt{r}
+\end{array}$$
+
+For a type-level record $\mt{r}$, a $\mt{folder} \; \mt{r}$ encodes a permutation of $\mt{r}$'s elements. The $\mt{fold}$ function can be called on a $\mt{folder}$ to iterate over the elements of $\mt{r}$ in that order. $\mt{fold}$ is parameterized on a type-level function to be used to calculate the type of each intermediate result of folding. After processing a subset $\mt{r'}$ of $\mt{r}$'s entries, the type of the accumulator should be $\mt{tf} \; \mt{r'}$. The next two expression arguments to $\mt{fold}$ are the usual step function and initial accumulator, familiar from fold functions over lists. The final two arguments are the record to fold over and a $\mt{folder}$ for it.
+
+The Ur compiler treates $\mt{folder}$ like a constructor class, using built-in rules to infer $\mt{folder}$s for records with known structure. The order in which field names are mentioned in source code is used as a hint about the permutation that the programmer would like.
+
\section{The Ur/Web Standard Library}
--
cgit v1.2.3
From a7ec41ffd3043f91b25996f8da34d7533394348d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 11:56:10 -0400
Subject: Update old Ur/Web library section, before adding new stuff
---
doc/manual.tex | 34 +++++++++++++++++++++++-----------
lib/ur/basis.urs | 4 ++--
2 files changed, 25 insertions(+), 13 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 7a0a0002..fb056fd0 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1129,14 +1129,27 @@ The Ur compiler treates $\mt{folder}$ like a constructor class, using built-in r
\section{The Ur/Web Standard Library}
+\subsection{Monads}
+
+The Ur Basis defines the monad constructor class from Haskell.
+
+$$\begin{array}{l}
+ \mt{class} \; \mt{monad} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{return} : \mt{m} ::: (\mt{Type} \to \mt{Type}) \to \mt{t} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{monad} \; \mt{m} \\
+ \hspace{.1in} \to \mt{t} \to \mt{m} \; \mt{t} \\
+ \mt{val} \; \mt{bind} : \mt{m} ::: (\mt{Type} \to \mt{Type}) \to \mt{t1} ::: \mt{Type} \to \mt{t2} ::: \mt{Type} \\
+ \hspace{.1in} \to \mt{monad} \; \mt{m} \\
+ \hspace{.1in} \to \mt{m} \; \mt{t1} \to (\mt{t1} \to \mt{m} \; \mt{t2}) \\
+ \hspace{.1in} \to \mt{m} \; \mt{t2}
+\end{array}$$
+
\subsection{Transactions}
Ur is a pure language; we use Haskell's trick to support controlled side effects. The standard library defines a monad $\mt{transaction}$, meant to stand for actions that may be undone cleanly. By design, no other kinds of actions are supported.
$$\begin{array}{l}
\mt{con} \; \mt{transaction} :: \mt{Type} \to \mt{Type} \\
- \\
- \mt{val} \; \mt{return} : \mt{t} ::: \mt{Type} \to \mt{t} \to \mt{transaction} \; \mt{t} \\
- \mt{val} \; \mt{bind} : \mt{t_1} ::: \mt{Type} \to \mt{t_2} ::: \mt{Type} \to \mt{transaction} \; \mt{t_1} \to (\mt{t_1} \to \mt{transaction} \; \mt{t_2}) \to \mt{transaction} \; \mt{t_2}
+ \mt{val} \; \mt{transaction\_monad} : \mt{monad} \; \mt{transaction}
\end{array}$$
\subsection{HTTP}
@@ -1175,7 +1188,7 @@ $$\begin{array}{l}
Queries are used by folding over their results inside transactions.
$$\begin{array}{l}
\mt{val} \; \mt{query} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \lambda [\mt{tables} \sim \mt{exps}] \Rightarrow \mt{state} ::: \mt{Type} \to \mt{sql\_query} \; \mt{tables} \; \mt{exps} \\
- \hspace{.1in} \to (\$(\mt{exps} \rc \mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: \{\mt{Type}\}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \$\mt{fields}] \rc \mt{acc}) \; [] \; \mt{tables}) \\
+ \hspace{.1in} \to (\$(\mt{exps} \rc \mt{map} \; (\lambda \mt{fields} :: \{\mt{Type}\} \Rightarrow \$\mt{fields}) \; \mt{tables}) \\
\hspace{.2in} \to \mt{state} \to \mt{transaction} \; \mt{state}) \\
\hspace{.1in} \to \mt{state} \to \mt{transaction} \; \mt{state}
\end{array}$$
@@ -1203,12 +1216,12 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{grouped} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
- \hspace{.1in} \to \{\mt{From} : \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: \{\mt{Type}\}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_table} \; \mt{fields}] \rc \mt{acc}) \; [] \; \mt{tables}), \\
+ \hspace{.1in} \to \{\mt{From} : \$(\mt{map} \; \mt{sql\_table} \; \mt{tables}), \\
\hspace{.2in} \mt{Where} : \mt{sql\_exp} \; \mt{tables} \; [] \; [] \; \mt{bool}, \\
\hspace{.2in} \mt{GroupBy} : \mt{sql\_subset} \; \mt{tables} \; \mt{grouped}, \\
\hspace{.2in} \mt{Having} : \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{bool}, \\
\hspace{.2in} \mt{SelectFields} : \mt{sql\_subset} \; \mt{grouped} \; \mt{selectedFields}, \\
- \hspace{.2in} \mt {SelectExps} : \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{selectedExps}) \} \\
+ \hspace{.2in} \mt {SelectExps} : \$(\mt{map} \; (\mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; []) \; \mt{selectedExps}) \} \\
\hspace{.1in} \to \mt{sql\_query1} \; \mt{tables} \; \mt{selectedFields} \; \mt{selectedExps}
\end{array}$$
@@ -1217,9 +1230,8 @@ $$\begin{array}{l}
\mt{con} \; \mt{sql\_subset} :: \{\{\mt{Type}\}\} \to \{\{\mt{Type}\}\} \to \mt{Type} \\
\mt{val} \; \mt{sql\_subset} : \mt{keep\_drop} :: \{(\{\mt{Type}\} \times \{\mt{Type}\})\} \\
\hspace{.1in} \to \mt{sql\_subset} \\
- \hspace{.2in} (\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\})) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \; [\mt{fields}.1 \sim \mt{fields}.2] \Rightarrow \\
- \hspace{.3in} [\mt{nm} = \mt{fields}.1 \rc \mt{fields}.2] \rc \mt{acc}) \; [] \; \mt{keep\_drop}) \\
- \hspace{.2in} (\mt{fold} \; (\lambda \mt{nm} \; (\mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\})) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{fields}.1] \rc \mt{acc}) \; [] \; \mt{keep\_drop}) \\
+ \hspace{.2in} (\mt{map} \; (\lambda \mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\}) \Rightarrow \mt{fields}.1 \rc \mt{fields}.2)\; \mt{keep\_drop}) \\
+ \hspace{.2in} (\mt{map} \; (\lambda \mt{fields} :: (\{\mt{Type}\} \times \{\mt{Type}\}) \Rightarrow \mt{fields}.1) \; \mt{keep\_drop}) \\
\mt{val} \; \mt{sql\_subset\_all} : \mt{tables} :: \{\{\mt{Type}\}\} \to \mt{sql\_subset} \; \mt{tables} \; \mt{tables}
\end{array}$$
@@ -1363,13 +1375,13 @@ $$\begin{array}{l}
Properly-typed records may be used to form $\mt{INSERT}$ commands.
$$\begin{array}{l}
\mt{val} \; \mt{insert} : \mt{fields} ::: \{\mt{Type}\} \to \mt{sql\_table} \; \mt{fields} \\
- \hspace{.1in} \to \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; [] \; [] \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{fields}) \to \mt{dml}
+ \hspace{.1in} \to \$(\mt{map} \; (\mt{sql\_exp} \; [] \; [] \; []) \; \mt{fields}) \to \mt{dml}
\end{array}$$
An $\mt{UPDATE}$ command is formed from a choice of which table fields to leave alone and which to change, along with an expression to use to compute the new value of each changed field and a $\mt{WHERE}$ clause.
$$\begin{array}{l}
\mt{val} \; \mt{update} : \mt{unchanged} ::: \{\mt{Type}\} \to \mt{changed} :: \{\mt{Type}\} \to \lambda [\mt{changed} \sim \mt{unchanged}] \\
- \hspace{.1in} \Rightarrow \$(\mt{fold} \; (\lambda \mt{nm} \; (\mt{t} :: \mt{Type}) \; \mt{acc} \; [[\mt{nm}] \sim \mt{acc}] \Rightarrow [\mt{nm} = \mt{sql\_exp} \; [\mt{T} = \mt{changed} \rc \mt{unchanged}] \; [] \; [] \; \mt{t}] \rc \mt{acc}) \; [] \; \mt{changed}) \\
+ \hspace{.1in} \Rightarrow \$(\mt{map} \; (\mt{sql\_exp} \; [\mt{T} = \mt{changed} \rc \mt{unchanged}] \; [] \; []) \; \mt{changed}) \\
\hspace{.1in} \to \mt{sql\_table} \; (\mt{changed} \rc \mt{unchanged}) \to \mt{sql\_exp} \; [\mt{T} = \mt{changed} \rc \mt{unchanged}] \; [] \; [] \; \mt{bool} \to \mt{dml}
\end{array}$$
diff --git a/lib/ur/basis.urs b/lib/ur/basis.urs
index c2a55168..e4bff8a9 100644
--- a/lib/ur/basis.urs
+++ b/lib/ur/basis.urs
@@ -128,12 +128,12 @@ val sql_query1 : tables ::: {{Type}}
-> grouped ::: {{Type}}
-> selectedFields ::: {{Type}}
-> selectedExps ::: {Type}
- -> {From : $(map (fn fields :: {Type} => sql_table fields) tables),
+ -> {From : $(map sql_table tables),
Where : sql_exp tables [] [] bool,
GroupBy : sql_subset tables grouped,
Having : sql_exp grouped tables [] bool,
SelectFields : sql_subset grouped selectedFields,
- SelectExps : $(map (fn (t :: Type) => sql_exp grouped tables [] t) selectedExps) }
+ SelectExps : $(map (sql_exp grouped tables []) selectedExps) }
-> sql_query1 tables selectedFields selectedExps
type sql_relop
--
cgit v1.2.3
From 4bdb9cdaf35e83082260e91556c35590028282d9 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 12:10:02 -0400
Subject: Functional-reactive subsection
---
doc/manual.tex | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index fb056fd0..b57953ea 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1449,6 +1449,34 @@ $$\begin{array}{l}
\end{array}$$
+\subsection{Functional-Reactive Client-Side Programming}
+
+Ur/Web supports running code on web browsers, via automatic compilation to JavaScript. Most approaches to this kind of coding involve imperative manipulation of the DOM tree representing an HTML document's structure. Ur/Web follows the \emph{functional-reactive} approach instead. Programs may allocate mutable \emph{sources} of arbitrary types, and an HTML page is effectively a pure function over the latest values of the sources. The page is not mutated directly, but rather it changes automatically as the sources are mutated.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{source} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{source} : \mt{t} ::: \mt{Type} \to \mt{t} \to \mt{transaction} \; (\mt{source} \; \mt{t}) \\
+ \mt{val} \; \mt{set} : \mt{t} ::: \mt{Type} \to \mt{source} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{get} : \mt{t} ::: \mt{Type} \to \mt{source} \; \mt{t} \to \mt{transaction} \; \mt{t}
+\end{array}$$
+
+Pure functions over sources are represented in a monad of \emph{signals}.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{signal} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{signal\_monad} : \mt{monad} \; \mt{signal} \\
+ \mt{val} \; \mt{signal} : \mt{t} ::: \mt{Type} \to \mt{source} \; \mt{t} \to \mt{signal} \; \mt{t}
+\end{array}$$
+
+A reactive portion of an HTML page is injected with a $\mt{dyn}$ tag, which has a signal-valued attribute $\mt{Signal}$.
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{dyn} : \mt{ctx} ::: \{\mt{Unit}\} \to \mt{use} ::: \{\mt{Type}\} \to \mt{bind} ::: \{\mt{Type}\} \to \mt{unit} \\
+ \hspace{.1in} \to \mt{tag} \; [\mt{Signal} = \mt{signal} \; (\mt{xml} \; \mt{ctx} \; \mt{use} \; \mt{bind})] \; \mt{ctx} \; [] \; \mt{use} \; \mt{bind}
+\end{array}$$
+
+Transactions can be run on the client by including them in attributes like the $\mt{OnClick}$ attribute of $\mt{button}$, and GUI widgets like $\mt{ctextbox}$ have $\mt{Source}$ attributes that can be used to connect them to sources, so that their values can be read by code running because of, e.g., an $\mt{OnClick}$ event.
+
\section{Ur/Web Syntax Extensions}
Ur/Web features some syntactic shorthands for building values using the functions from the last section. This section sketches the grammar of those extensions. We write spans of syntax inside brackets to indicate that they are optional.
--
cgit v1.2.3
From 8947397e8e7133f0f284211f0e12f662728c1a35 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 12:18:11 -0400
Subject: Describe AJAX RPC structure
---
doc/manual.tex | 2 ++
1 file changed, 2 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b57953ea..72360bb8 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1573,6 +1573,8 @@ HTML forms are handled in a similar way. The $\mt{action}$ attribute of a $\mt{
For both links and actions, direct arguments and local variables mentioned implicitly via closures are automatically included in serialized form in URLs, in the order in which they appear in the source code.
+Ur/Web programs generally mix server- and client-side code in a fairly transparent way. The one important restriction is that mixed client-server code must encapsulate all server-side pieces within named functions. This is because execution of such pieces will be implemented by explicit calls to the remote web server, and it is useful to get the programmer's help in designing the interface to be used. For example, this makes it easier to allow a client running an old version of an application to continue interacting with a server that has been upgraded to a new version, if the programmer took care to keep the interfaces of all of the old remote calls the same. The functions implementing these services are assigned names in the same way as normal web entry points, by using module structure.
+
\section{Compiler Phases}
--
cgit v1.2.3
From 64c858689d78c4ed5a83363207faca5ecb2cbb91 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 12:23:23 -0400
Subject: Update compiler phases
---
doc/manual.tex | 8 ++++++++
1 file changed, 8 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 72360bb8..0038d3b1 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1610,6 +1610,14 @@ Remove unnecessary mutual recursion, splitting recursive groups into strongly-co
Remove all definitions not needed to run the page handlers that are visible in the signature of the last module listed in the \texttt{.urp} file.
+\subsection{Rpcify}
+
+Pieces of code are determined to be client-side, server-side, neither, or both, by figuring out which standard library functions might be needed to execute them. Calls to server-side functions (e.g., $\mt{query}$) within mixed client-server code are identified and replaced with explicit remote calls. Some mixed functions may be converted to continuation-passing style to facilitate this transformation.
+
+\subsection{Untangle, Shake}
+
+Repeat these simplifications.
+
\subsection{\label{tag}Tag}
Assign a URL name to each link and form action. It is important that these links and actions are written as applications of named functions, because such names are used to generate URL patterns. A URL pattern has a name built from the full module path of the named function, followed by the function name, with all pieces separated by slashes. The path of a functor application is based on the name given to the result, rather than the path of the functor itself.
--
cgit v1.2.3
From c826168d7e72d3b5454ef581b61faaf11ff3be68 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 12:25:05 -0400
Subject: Add guard elim rule
---
doc/manual.tex | 4 ++++
1 file changed, 4 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 0038d3b1..abce0c44 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -665,6 +665,10 @@ $$\infer{\Gamma \vdash \lambda [c_1 \sim c_2] \Rightarrow e : \lambda [c_1 \sim
\Gamma \vdash c_1 :: \{\kappa\}
& \Gamma \vdash c_2 :: \{\kappa'\}
& \Gamma, c_1 \sim c_2 \vdash e : \tau
+}
+\quad \infer{\Gamma \vdash e \; ! : \tau}{
+ \Gamma \vdash e : [c_1 \sim c_2] \Rightarrow \tau
+ & \Gamma \vdash c_1 \sim c_2
}$$
\subsection{Pattern Typing}
--
cgit v1.2.3
From e3fe4307964fd93ddac266cf125ff2264fde2656 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 12 Mar 2009 12:34:39 -0400
Subject: Spell-check manual
---
doc/manual.tex | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index abce0c44..d52927ea 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1128,7 +1128,7 @@ $$\begin{array}{l}
For a type-level record $\mt{r}$, a $\mt{folder} \; \mt{r}$ encodes a permutation of $\mt{r}$'s elements. The $\mt{fold}$ function can be called on a $\mt{folder}$ to iterate over the elements of $\mt{r}$ in that order. $\mt{fold}$ is parameterized on a type-level function to be used to calculate the type of each intermediate result of folding. After processing a subset $\mt{r'}$ of $\mt{r}$'s entries, the type of the accumulator should be $\mt{tf} \; \mt{r'}$. The next two expression arguments to $\mt{fold}$ are the usual step function and initial accumulator, familiar from fold functions over lists. The final two arguments are the record to fold over and a $\mt{folder}$ for it.
-The Ur compiler treates $\mt{folder}$ like a constructor class, using built-in rules to infer $\mt{folder}$s for records with known structure. The order in which field names are mentioned in source code is used as a hint about the permutation that the programmer would like.
+The Ur compiler treats $\mt{folder}$ like a constructor class, using built-in rules to infer $\mt{folder}$s for records with known structure. The order in which field names are mentioned in source code is used as a hint about the permutation that the programmer would like.
\section{The Ur/Web Standard Library}
@@ -1647,6 +1647,7 @@ Here the compiler repeats the earlier Shake phase.
Programs are translated to a new intermediate language without polymorphism or non-$\mt{Type}$ constructors. Error messages may pop up here if earlier phases failed to remove such features.
This is the stage at which concrete names are generated for cookies, tables, and sequences. They are named following the same convention as for links and actions, based on module path information saved from earlier stages. Table and sequence names separate path elements with underscores instead of slashes, and they are prefixed by \texttt{uw\_}.
+
\subsection{MonoOpt}
Simple algebraic laws are applied to simplify the program, focusing especially on efficient imperative generation of HTML pages.
--
cgit v1.2.3
From 50aed147862bf5fb1b94528d254013f259f6eba8 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 5 Apr 2009 12:37:38 -0400
Subject: Update the manual
---
doc/manual.tex | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++-----
lib/ur/basis.urs | 1 -
2 files changed, 52 insertions(+), 6 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index d52927ea..b5eb191d 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -129,6 +129,9 @@ For each entry \texttt{M} in the module list, the file \texttt{M.urs} is include
A few other named directives are supported. \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application; the default is \texttt{/}. \texttt{exe FILENAME} sets the filename to which to write the output executable; the default for file \texttt{P.urp} is \texttt{P.exe}. \texttt{debug} saves some intermediate C files, which is mostly useful to help in debugging the compiler itself. \texttt{profile} generates an executable that may be used with gprof.
+\texttt{timeout N} sets to \texttt{N} seconds the amount of time that the generated server will wait after the last contact from a client before determining that that client has exited the application. Clients that remain active will take the timeout setting into account in determining how often to ping the server, so it only makes sense to set a high timeout to cope with browser and network delays and failures. Higher timeouts can lead to more unnecessary client information taking up memory on the server. The timeout goes unused by any page that doesn't involve the \texttt{recv} function, since the server only needs to store per-client information for clients that receive asynchronous messages.
+
+
\subsection{Building an Application}
To compile project \texttt{P.urp}, simply run
@@ -1453,9 +1456,30 @@ $$\begin{array}{l}
\end{array}$$
-\subsection{Functional-Reactive Client-Side Programming}
+\subsection{Client-Side Programming}
+
+Ur/Web supports running code on web browsers, via automatic compilation to JavaScript.
+
+\subsubsection{The Basics}
+
+Clients can open alert dialog boxes, in the usual annoying JavaScript way.
+$$\begin{array}{l}
+ \mt{val} \; \mt{alert} : \mt{string} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
+Any transaction may be run in a new thread with the $\mt{spawn}$ function.
+$$\begin{array}{l}
+ \mt{val} \; \mt{spawn} : \mt{transaction} \; \mt{unit} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
+The current thread can be paused for at least a specified number of milliseconds.
+$$\begin{array}{l}
+ \mt{val} \; \mt{sleep} : \mt{int} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
+\subsubsection{Functional-Reactive Page Generation}
-Ur/Web supports running code on web browsers, via automatic compilation to JavaScript. Most approaches to this kind of coding involve imperative manipulation of the DOM tree representing an HTML document's structure. Ur/Web follows the \emph{functional-reactive} approach instead. Programs may allocate mutable \emph{sources} of arbitrary types, and an HTML page is effectively a pure function over the latest values of the sources. The page is not mutated directly, but rather it changes automatically as the sources are mutated.
+Most approaches to ``AJAX''-style coding involve imperative manipulation of the DOM tree representing an HTML document's structure. Ur/Web follows the \emph{functional-reactive} approach instead. Programs may allocate mutable \emph{sources} of arbitrary types, and an HTML page is effectively a pure function over the latest values of the sources. The page is not mutated directly, but rather it changes automatically as the sources are mutated.
$$\begin{array}{l}
\mt{con} \; \mt{source} :: \mt{Type} \to \mt{Type} \\
@@ -1475,11 +1499,34 @@ $$\begin{array}{l}
A reactive portion of an HTML page is injected with a $\mt{dyn}$ tag, which has a signal-valued attribute $\mt{Signal}$.
$$\begin{array}{l}
- \mt{val} \; \mt{dyn} : \mt{ctx} ::: \{\mt{Unit}\} \to \mt{use} ::: \{\mt{Type}\} \to \mt{bind} ::: \{\mt{Type}\} \to \mt{unit} \\
- \hspace{.1in} \to \mt{tag} \; [\mt{Signal} = \mt{signal} \; (\mt{xml} \; \mt{ctx} \; \mt{use} \; \mt{bind})] \; \mt{ctx} \; [] \; \mt{use} \; \mt{bind}
+ \mt{val} \; \mt{dyn} : \mt{use} ::: \{\mt{Type}\} \to \mt{bind} ::: \{\mt{Type}\} \to \mt{unit} \\
+ \hspace{.1in} \to \mt{tag} \; [\mt{Signal} = \mt{signal} \; (\mt{xml} \; \mt{body} \; \mt{use} \; \mt{bind})] \; \mt{body} \; [] \; \mt{use} \; \mt{bind}
+\end{array}$$
+
+Transactions can be run on the client by including them in attributes like the $\mt{Onclick}$ attribute of $\mt{button}$, and GUI widgets like $\mt{ctextbox}$ have $\mt{Source}$ attributes that can be used to connect them to sources, so that their values can be read by code running because of, e.g., an $\mt{Onclick}$ event.
+
+\subsubsection{Asynchronous Message-Passing}
+
+To support asynchronous, ``server push'' delivery of messages to clients, any client that might need to receive an asynchronous message is assigned a unique ID. These IDs may be retrieved both on the client and on the server, during execution of code related to a client.
+
+$$\begin{array}{l}
+ \mt{type} \; \mt{client} \\
+ \mt{val} \; \mt{self} : \mt{transaction} \; \mt{client}
\end{array}$$
-Transactions can be run on the client by including them in attributes like the $\mt{OnClick}$ attribute of $\mt{button}$, and GUI widgets like $\mt{ctextbox}$ have $\mt{Source}$ attributes that can be used to connect them to sources, so that their values can be read by code running because of, e.g., an $\mt{OnClick}$ event.
+\emph{Channels} are the means of message-passing. Each channel is created in the context of a client and belongs to that client; no other client may receive the channel's messages. Each channel type includes the type of values that may be sent over the channel. Sending and receiving are asynchronous, in the sense that a client need not be ready to receive a message right away. Rather, sent messages may queue up, waiting to be processed.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{channel} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{channel} : \mt{t} ::: \mt{Type} \to \mt{transaction} \; (\mt{channel} \; \mt{t}) \\
+ \mt{val} \; \mt{send} : \mt{t} ::: \mt{Type} \to \mt{channel} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{recv} : \mt{t} ::: \mt{Type} \to \mt{channel} \; \mt{t} \to \mt{transaction} \; \mt{t}
+\end{array}$$
+
+The $\mt{channel}$ and $\mt{send}$ operations may only be executed on the server, and $\mt{recv}$ may only be executed on a client. Neither clients nor channels may be passed as arguments from clients to server-side functions, so persistent channels can only be maintained by storing them in the database and looking them up using the current client ID or some application-specific value as a key.
+
+Clients and channels live only as long as the web browser page views that they are associated with. When a user surfs away, his client and its channels will be garbage-collected, after that user is not heard from for the timeout period. Garbage collection deletes any database row that contains a client or channel directly. Any reference to one of these types inside an $\mt{option}$ is set to $\mt{None}$ instead. Both kinds of handling have the flavor of weak pointers, and that is a useful way to think about clients and channels in the database.
+
\section{Ur/Web Syntax Extensions}
diff --git a/lib/ur/basis.urs b/lib/ur/basis.urs
index 0d5d3d71..1cbca61d 100644
--- a/lib/ur/basis.urs
+++ b/lib/ur/basis.urs
@@ -113,7 +113,6 @@ val sleep : int -> transaction unit
con channel :: Type -> Type
val channel : t ::: Type -> transaction (channel t)
-val subscribe : t ::: Type -> channel t -> transaction unit
val send : t ::: Type -> channel t -> t -> transaction unit
val recv : t ::: Type -> channel t -> transaction t
--
cgit v1.2.3
From b250dac52673020574c2814081699a9a2ee9608b Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 11:45:03 -0400
Subject: Revising manual through end of Section 3
---
doc/manual.tex | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b5eb191d..3f549c5b 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -56,10 +56,10 @@ make
sudo make install
\end{verbatim}
-Some other packages must be installed for the above to work. At a minimum, you need a standard UNIX shell, with standard UNIX tools like sed and GCC in your execution path; and MLton, the whole-program optimizing compiler for Standard ML. To build programs that access SQL databases, you also need libpq, the PostgreSQL client library. As of this writing, in the ``testing'' version of Debian Linux, this command will install the more uncommon of these dependencies:
+Some other packages must be installed for the above to work. At a minimum, you need a standard UNIX shell, with standard UNIX tools like sed and GCC in your execution path; MLton, the whole-program optimizing compiler for Standard ML; and the mhash C library. To build programs that access SQL databases, you also need libpq, the PostgreSQL client library. As of this writing, in the ``testing'' version of Debian Linux, this command will install the more uncommon of these dependencies:
\begin{verbatim}
-apt-get install mlton libpq-dev
+apt-get install mlton libmhash-dev libpq-dev
\end{verbatim}
It is also possible to access the modules of the Ur/Web compiler interactively, within Standard ML of New Jersey. To install the prerequisites in Debian testing:
@@ -127,9 +127,27 @@ A blank line always separates the named directives from a list of modules to inc
For each entry \texttt{M} in the module list, the file \texttt{M.urs} is included in the project if it exists, and the file \texttt{M.ur} must exist and is always included.
-A few other named directives are supported. \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application; the default is \texttt{/}. \texttt{exe FILENAME} sets the filename to which to write the output executable; the default for file \texttt{P.urp} is \texttt{P.exe}. \texttt{debug} saves some intermediate C files, which is mostly useful to help in debugging the compiler itself. \texttt{profile} generates an executable that may be used with gprof.
-
-\texttt{timeout N} sets to \texttt{N} seconds the amount of time that the generated server will wait after the last contact from a client before determining that that client has exited the application. Clients that remain active will take the timeout setting into account in determining how often to ping the server, so it only makes sense to set a high timeout to cope with browser and network delays and failures. Higher timeouts can lead to more unnecessary client information taking up memory on the server. The timeout goes unused by any page that doesn't involve the \texttt{recv} function, since the server only needs to store per-client information for clients that receive asynchronous messages.
+Here is the complete list of directive forms. ``FFI'' stands for ``foreign function interface,'' Ur's facility for interaction between Ur programs and C and JavaScript libraries.
+\begin{itemize}
+\item \texttt{[allow|deny] [url|mime] PATTERN} registers a rule governing which URLs or MIME types are allowed in this application. The first such rule to match a URL or MIME type determines the verdict. If \texttt{PATTERN} ends in \texttt{*}, it is interpreted as a prefix rule. Otherwise, a string must match it exactly.
+\item \texttt{clientOnly Module.ident} registers an FFI function or transaction that may only be run in client browsers.
+\item \texttt{clientToServer Module.ident} adds FFI type \texttt{Module.ident} to the list of types that are OK to marshal from clients to servers. Values like XML trees and SQL queries are hard to marshal without introducing expensive validity checks, so it's easier to ensure that the server never trusts clients to send such values. The file \texttt{include/urweb.h} shows examples of the C support functions that are required of any type that may be marshalled. These include \texttt{attrify}, \texttt{urlify}, and \texttt{unurlify} functions.
+\item \texttt{database DBSTRING} sets the string to pass to libpq to open a database connection.
+\item \texttt{debug} saves some intermediate C files, which is mostly useful to help in debugging the compiler itself.
+\item \texttt{effectful Module.ident} registers an FFI function or transaction as having side effects. The optimizer avoids removing, moving, or duplicating calls to such functions. Every effectful FFI function must be registered, or the optimizer may make invalid transformations.
+\item \texttt{exe FILENAME} sets the filename to which to write the output executable. The default for file \texttt{P.urp} is \texttt{P.exe}.
+\item \texttt{ffi FILENAME} reads the file \texttt{FILENAME.urs} to determine the interface to a new FFI module. The name of the module is calculated from \texttt{FILENAME} in the same way as for normal source files. See the files \texttt{include/urweb.h} and \texttt{src/c/urweb.c} for examples of C headers and implementations for FFI modules. In general, every type or value \texttt{Module.ident} becomes \texttt{uw\_Module\_ident} in C.
+\item \texttt{jsFunc Module.ident=name} gives the JavaScript name of an FFI value.
+\item \texttt{library FILENAME} parses \texttt{FILENAME.urp} and merges its contents with the rest of the current file's contents.
+\item \texttt{link FILENAME} adds \texttt{FILENAME} to the list of files to be passed to the GCC linker at the end of compilation. This is most useful for importing extra libraries needed by new FFI modules.
+\item \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application. The default is \texttt{/}.
+\item \texttt{profile} generates an executable that may be used with gprof.
+\item \texttt{rewrite KIND FROM TO} gives a rule for rewriting canonical module paths. For instance, the canonical path of a page may be \texttt{Mod1.Mod2.mypage}, while you would rather the page were accessed via a URL containing only \texttt{page}. The directive \texttt{rewrite url Mod1/Mod2/mypage page} would accomplish that. The possible values of \texttt{KIND} determine which kinds of objects are affected. The kind \texttt{all} matches any object, and \texttt{url} matches page URLs. The kinds \texttt{table}, \texttt{sequence}, and \texttt{view} match those sorts of SQL entities, and \texttt{relation} matches any of those three. \texttt{cookie} matches HTTP cookies, and \texttt{style} matches CSS class names. If \texttt{FROM} ends in \texttt{/*}, it is interpreted as a prefix matching rule, and rewriting occurs by replacing only the appropriate prefix of a path with \texttt{TO}. While the actual external names of relations and styles have parts separated by underscores instead of slashes, all rewrite rules must be written in terms of slashes.
+\item \texttt{script URL} adds \texttt{URL} to the list of extra JavaScript files to be included at the beginning of any page that uses JavaScript. This is most useful for importing JavaScript versions of functions found in new FFI modules.
+\item \texttt{serverOnly Module.ident} registers an FFI function or transaction that may only be run on the server.
+\item \texttt{sql FILENAME} sets where to write an SQL file with the commands to create the expected database schema. The default is not to create such a file.
+\item \texttt{timeout N} sets to \texttt{N} seconds the amount of time that the generated server will wait after the last contact from a client before determining that that client has exited the application. Clients that remain active will take the timeout setting into account in determining how often to ping the server, so it only makes sense to set a high timeout to cope with browser and network delays and failures. Higher timeouts can lead to more unnecessary client information taking up memory on the server. The timeout goes unused by any page that doesn't involve the \texttt{recv} function, since the server only needs to store per-client information for clients that receive asynchronous messages.
+\end{itemize}
\subsection{Building an Application}
--
cgit v1.2.3
From 3840a47b19607d0cf8136bbdaa9b688a63328819 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 11:59:50 -0400
Subject: Revising manual through end of Section 6
---
doc/manual.tex | 43 ++++++++++++++++++++++++++++---------------
1 file changed, 28 insertions(+), 15 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 3f549c5b..368e3024 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -166,7 +166,7 @@ urweb -timing P
\section{Ur Syntax}
-In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for tables, sequences, and cookies.
+In this section, we describe the syntax of Ur, deferring to a later section discussion of most of the syntax specific to SQL and XML. The sole exceptions are the declaration forms for relations, cookies, and styles.
\subsection{Lexical Conventions}
@@ -343,8 +343,10 @@ $$\begin{array}{rrcll}
&&& \mt{constraint} \; c \sim c & \textrm{record disjointness constraint} \\
&&& \mt{open} \; \mt{constraints} \; M & \textrm{inclusion of just the constraints from a module} \\
&&& \mt{table} \; x : c & \textrm{SQL table} \\
+ &&& \mt{view} \; x : c & \textrm{SQL view} \\
&&& \mt{sequence} \; x & \textrm{SQL sequence} \\
&&& \mt{cookie} \; x : \tau & \textrm{HTTP cookie} \\
+ &&& \mt{style} \; x : \tau & \textrm{CSS class} \\
&&& \mt{class} \; x :: \kappa = c & \textrm{concrete constructor class} \\
\\
\textrm{Modules} & M &::=& \mt{struct} \; d^* \; \mt{end} & \textrm{constant} \\
@@ -356,6 +358,8 @@ $$\begin{array}{rrcll}
There are two kinds of Ur files. A file named $M\texttt{.ur}$ is an \emph{implementation file}, and it should contain a sequence of declarations $d^*$. A file named $M\texttt{.urs}$ is an \emph{interface file}; it must always have a matching $M\texttt{.ur}$ and should contain a sequence of signature items $s^*$. When both files are present, the overall effect is the same as a monolithic declaration $\mt{structure} \; M : \mt{sig} \; s^* \; \mt{end} = \mt{struct} \; d^* \; \mt{end}$. When no interface file is included, the overall effect is similar, with a signature for module $M$ being inferred rather than just checked against an interface.
+We omit some extra possibilities in $\mt{table}$ syntax, deferring them to Section \ref{tables}.
+
\subsection{Shorthands}
There are a variety of derived syntactic forms that elaborate into the core syntax from the last subsection. We will present the additional forms roughly following the order in which we presented the constructs that they elaborate into.
@@ -392,7 +396,7 @@ The syntax $\mt{if} \; e \; \mt{then} \; e_1 \; \mt{else} \; e_2$ expands to $\m
There are infix operator syntaxes for a number of functions defined in the $\mt{Basis}$ module. There is $=$ for $\mt{eq}$, $\neq$ for $\mt{neq}$, $-$ for $\mt{neg}$ (as a prefix operator) and $\mt{minus}$, $+$ for $\mt{plus}$, $\times$ for $\mt{times}$, $/$ for $\mt{div}$, $\%$ for $\mt{mod}$, $<$ for $\mt{lt}$, $\leq$ for $\mt{le}$, $>$ for $\mt{gt}$, and $\geq$ for $\mt{ge}$.
-A signature item $\mt{table} \; x : c$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{sql\_table} \; c$. $\mt{sequence} \; x$ is short for $\mt{val} \; x : \mt{Basis}.\mt{sql\_sequence}$, and $\mt{cookie} \; x : \tau$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{http\_cookie} \; \tau$.
+A signature item $\mt{table} \; x : c$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{sql\_table} \; c \; []$. $\mt{view} \; x : c$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{sql\_view} \; c$, $\mt{sequence} \; x$ is short for $\mt{val} \; x : \mt{Basis}.\mt{sql\_sequence}$. $\mt{cookie} \; x : \tau$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{http\_cookie} \; \tau$, and $\mt{style} \; x$ is shorthand for $\mt{val} \; x : \mt{Basis}.\mt{css\_class}$.
\section{Static Semantics}
@@ -790,17 +794,22 @@ $$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma}{
\Gamma \vdash M : \mt{sig} \; \overline{s} \; \mt{end}
}$$
-$$\infer{\Gamma \vdash \mt{table} \; x : c \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_table} \; c}{
+$$\infer{\Gamma \vdash \mt{table} \; x : c \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_table} \; c \; []}{
\Gamma \vdash c :: \{\mt{Type}\}
}
-\quad \infer{\Gamma \vdash \mt{sequence} \; x \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_sequence}}{}$$
+\quad \infer{\Gamma \vdash \mt{view} \; x : c \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_view} \; c}{
+ \Gamma \vdash c :: \{\mt{Type}\}
+}$$
+
+$$\infer{\Gamma \vdash \mt{sequence} \; x \leadsto \Gamma, x : \mt{Basis}.\mt{sql\_sequence}}{}$$
$$\infer{\Gamma \vdash \mt{cookie} \; x : \tau \leadsto \Gamma, x : \mt{Basis}.\mt{http\_cookie} \; \tau}{
\Gamma \vdash \tau :: \mt{Type}
-}$$
+}
+\quad \infer{\Gamma \vdash \mt{style} \; x \leadsto \Gamma, x : \mt{Basis}.\mt{css\_class}}{}$$
-$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa \to \mt{Type} = c}{
- \Gamma \vdash c :: \kappa \to \mt{Type}
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa = c}{
+ \Gamma \vdash c :: \kappa
}$$
$$\infer{\overline{y}; x; \Gamma \vdash \cdot \leadsto \Gamma}{}
@@ -856,10 +865,10 @@ $$\infer{\Gamma \vdash \mt{constraint} \; c_1 \sim c_2 \leadsto \Gamma, c_1 \sim
& \Gamma \vdash c_2 :: \{\kappa\}
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa \to \mt{Type} = c}{
- \Gamma \vdash c :: \kappa \to \mt{Type}
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leadsto \Gamma, x :: \kappa = c}{
+ \Gamma \vdash c :: \kappa
}
-\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa \leadsto \Gamma, x :: \kappa \to \mt{Type}}{}$$
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa \leadsto \Gamma, x :: \kappa}{}$$
\subsection{Signature Compatibility}
@@ -919,13 +928,13 @@ $$\infer{\Gamma \vdash \mt{datatype} \; x = \mt{datatype} \; M.z \leq \mt{con} \
& \mt{proj}(M, \overline{s}, \mt{datatype} \; z) = (\overline{y}, \overline{dc})
}$$
-$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa \leq \mt{con} \; x :: \kappa \to \mt{Type}}{}
-\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leq \mt{con} \; x :: \kappa \to \mt{Type}}{}$$
+$$\infer{\Gamma \vdash \mt{class} \; x :: \kappa \leq \mt{con} \; x :: \kappa}{}
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c \leq \mt{con} \; x :: \kappa}{}$$
$$\infer{\Gamma \vdash \mt{con} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \mt{\kappa} = c_2}{
\Gamma \vdash c_1 \equiv c_2
}
-\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \kappa \to \mt{Type} = c_2}{
+\quad \infer{\Gamma \vdash \mt{class} \; x :: \kappa = c_1 \leq \mt{con} \; x :: \kappa = c_2}{
\Gamma \vdash c_1 \equiv c_2
}$$
@@ -1019,8 +1028,10 @@ $$\infer{\Gamma \vdash M_1(M_2) : [X \mapsto M_2]S_2}{
\mt{sigOf}(\mt{constraint} \; c_1 \sim c_2) &=& \mt{constraint} \; c_1 \sim c_2 \\
\mt{sigOf}(\mt{open} \; \mt{constraints} \; M) &=& \cdot \\
\mt{sigOf}(\mt{table} \; x : c) &=& \mt{table} \; x : c \\
+ \mt{sigOf}(\mt{view} \; x : c) &=& \mt{view} \; x : c \\
\mt{sigOf}(\mt{sequence} \; x) &=& \mt{sequence} \; x \\
\mt{sigOf}(\mt{cookie} \; x : \tau) &=& \mt{cookie} \; x : \tau \\
+ \mt{sigOf}(\mt{style} \; x) &=& \mt{style} \; x \\
\mt{sigOf}(\mt{class} \; x :: \kappa = c) &=& \mt{class} \; x :: \kappa = c \\
\end{eqnarray*}
\begin{eqnarray*}
@@ -1098,9 +1109,9 @@ The type inference engine tries to take advantage of the algebraic rules governi
\subsection{\label{typeclasses}Constructor Classes}
-Ur includes a constructor class facility inspired by Haskell's. The current version is very rudimentary, only supporting instances for particular constructors built up from abstract constructors and datatypes and type-level application.
+Ur includes a constructor class facility inspired by Haskell's. The current version is experimental, with very general Prolog-like facilities that can lead to compile-time non-termination.
-Constructor classes are integrated with the module system. A constructor class of kind $\kappa$ is just a constructor of kind $\kappa \to \mt{Type}$. By marking such a constructor $c$ as a constructor class, the programmer instructs the type inference engine to, in each scope, record all values of types $c \; c'$ as \emph{instances}. Any function argument whose type is of such a form is treated as implicit, to be determined by examining the current instance database.
+Constructor classes are integrated with the module system. A constructor class of kind $\kappa$ is just a constructor of kind $\kappa$. By marking such a constructor $c$ as a constructor class, the programmer instructs the type inference engine to, in each scope, record all values of types $c \; c_1 \; \ldots \; c_n$ as \emph{instances}. Any function argument whose type is of such a form is treated as implicit, to be determined by examining the current instance database.
The ``dictionary encoding'' often used in Haskell implementations is made explicit in Ur. Constructor class instances are just properly-typed values, and they can also be considered as ``proofs'' of membership in the class. In some cases, it is useful to pass these proofs around explicitly. An underscore written where a proof is expected will also be inferred, if possible, from the current instance database.
@@ -1195,6 +1206,8 @@ $$\begin{array}{l}
\mt{con} \; \mt{sql\_table} :: \{\mt{Type}\} \to \mt{Type}
\end{array}$$
+\subsubsection{\label{tables}Tables}
+
\subsubsection{Queries}
A final query is constructed via the $\mt{sql\_query}$ function. Constructor arguments respectively specify the table fields we select (as records mapping tables to the subsets of their fields that we choose) and the (always named) extra expressions that we select.
--
cgit v1.2.3
From 4a557d7e2d43055e0958e6d655cc72c38dc3787d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 12:49:16 -0400
Subject: Table constraint Ur code
---
doc/manual.tex | 110 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 107 insertions(+), 3 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 368e3024..34972002 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1138,14 +1138,19 @@ $$\begin{array}{l}
\mt{type} \; \mt{float} \\
\mt{type} \; \mt{string} \\
\mt{type} \; \mt{time} \\
+ \mt{type} \; \mt{blob} \\
\\
\mt{type} \; \mt{unit} = \{\} \\
\\
\mt{datatype} \; \mt{bool} = \mt{False} \mid \mt{True} \\
\\
- \mt{datatype} \; \mt{option} \; \mt{t} = \mt{None} \mid \mt{Some} \; \mt{of} \; \mt{t}
+ \mt{datatype} \; \mt{option} \; \mt{t} = \mt{None} \mid \mt{Some} \; \mt{of} \; \mt{t} \\
+ \\
+ \mt{datatype} \; \mt{list} \; \mt{t} = \mt{Nil} \mid \mt{Cons} \; \mt{of} \; \mt{t} \times \mt{list} \; \mt{t}
\end{array}$$
+The only unusual element of this list is the $\mt{blob}$ type, which stands for binary sequences.
+
Another important generic Ur element comes at the beginning of \texttt{top.urs}.
$$\begin{array}{l}
@@ -1203,10 +1208,109 @@ $$\begin{array}{l}
The fundamental unit of interest in the embedding of SQL is tables, described by a type family and creatable only via the $\mt{table}$ declaration form.
$$\begin{array}{l}
- \mt{con} \; \mt{sql\_table} :: \{\mt{Type}\} \to \mt{Type}
+ \mt{con} \; \mt{sql\_table} :: \{\mt{Type}\} \to \{\{\mt{Unit}\}\} \to \mt{Type}
+\end{array}$$
+The first argument to this constructor gives the names and types of a table's columns, and the second argument gives the set of valid keys. Keys are the only subsets of the columns that may be referenced as foreign keys. Each key has a name.
+
+We also have the simpler type family of SQL views, which have no keys.
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_view} :: \{\mt{Type}\} \to \mt{Type}
\end{array}$$
-\subsubsection{\label{tables}Tables}
+A multi-parameter type class is used to allow tables and views to be used interchangeably, with a way of extracting the set of columns from each.
+$$\begin{array}{l}
+ \mt{class} \; \mt{fieldsOf} :: \mt{Type} \to \{\mt{Type}\} \to \mt{Type} \\
+ \mt{val} \; \mt{fieldsOf\_table} : \mt{fs} ::: \{\mt{Type}\} \to \mt{keys} ::: \{\{\mt{Unit}\}\} \to \mt{fieldsOf} \; (\mt{sql\_table} \; \mt{fs} \; \mt{keys}) \; \mt{fs} \\
+ \mt{val} \; \mt{fieldsOf\_view} : \mt{fs} ::: \{\mt{Type}\} \to \mt{fieldsOf} \; (\mt{sql\_view} \; \mt{fs}) \; \mt{fs}
+\end{array}$$
+
+\subsubsection{Table Constraints}
+
+Tables may be declared with constraints, such that database modifications that violate the constraints are blocked. A table may have at most one \texttt{PRIMARY KEY} constraint, which gives the subset of columns that will most often be used to look up individual rows in the table.
+
+$$\begin{array}{l}
+ \mt{con} \; \mt{primary\_key} :: \{\mt{Type}\} \to \{\{\mt{Unit}\}\} \to \mt{Type} \\
+ \mt{val} \; \mt{no\_primary\_key} : \mt{fs} ::: \{\mt{Type}\} \to \mt{primary\_key} \; \mt{fs} \; [] \\
+ \mt{val} \; \mt{primary\_key} : \mt{rest} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \to \mt{key1} :: \mt{Name} \to \mt{keys} :: \{\mt{Type}\} \\
+ \hspace{.1in} \to [[\mt{key1}] \sim \mt{keys}] \Rightarrow [[\mt{key1} = \mt{t}] \rc \mt{keys} \sim \mt{rest}] \\
+ \hspace{.1in} \Rightarrow \$([\mt{key1} = \mt{sql\_injectable\_prim} \; \mt{t}] \rc \mt{map} \; \mt{sql\_injectable\_prim} \; \mt{keys}) \\
+ \hspace{.1in} \to \mt{primary\_key} \; ([\mt{key1} = \mt{t}] \rc \mt{keys} \rc \mt{rest}) \; [\mt{Pkey} = [\mt{key1}] \rc \mt{map} \; (\lambda \_ \Rightarrow ()) \; \mt{keys}]
+\end{array}$$
+The type class $\mt{sql\_injectable\_prim}$ characterizes which types are allowed in SQL and are not $\mt{option}$ types. In SQL, a \texttt{PRIMARY KEY} constraint enforces after-the-fact that a column may not contain \texttt{NULL}s, but Ur/Web forces that information to be included in table types from the beginning. Thus, the only effect of this kind of constraint in Ur/Web is to enforce uniqueness of the given key within the table.
+
+A type family stands for sets of named constraints of the remaining varieties.
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_constraints} :: \{\mt{Type}\} \to \{\{\mt{Unit}\}\} \to \mt{Type}
+\end{array}$$
+The first argument gives the column types of the table being constrained, and the second argument maps constraint names to the keys that they define. Constraints that don't define keys are mapped to ``empty keys.''
+
+There is a type family of individual, unnamed constraints.
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_constraint} :: \{\mt{Type}\} \to \{\mt{Unit}\} \to \mt{Type}
+\end{array}$$
+The first argument is the same as above, and the second argument gives the key columns for just this constraint.
+
+We have operations for assembling constraints into constraint sets.
+$$\begin{array}{l}
+ \mt{val} \; \mt{no\_constraint} : \mt{fs} ::: \{\mt{Type}\} \to \mt{sql\_constraints} \; \mt{fs} \; [] \\
+ \mt{val} \; \mt{one\_constraint} : \mt{fs} ::: \{\mt{Type}\} \to \mt{unique} ::: \{\mt{Unit}\} \to \mt{name} :: \mt{Name} \\
+ \hspace{.1in} \to \mt{sql\_constraint} \; \mt{fs} \; \mt{unique} \to \mt{sql\_constraints} \; \mt{fs} \; [\mt{name} = \mt{unique}] \\
+ \mt{val} \; \mt{join\_constraints} : \mt{fs} ::: \{\mt{Type}\} \to \mt{uniques1} ::: \{\{\mt{Unit}\}\} \to \mt{uniques2} ::: \{\{\mt{Unit}\}\} \to [\mt{uniques1} \sim \mt{uniques2}] \\
+ \hspace{.1in} \Rightarrow \mt{sql\_constraints} \; \mt{fs} \; \mt{uniques1} \to \mt{sql\_constraints} \; \mt{fs} \; \mt{uniques2} \to \mt{sql\_constraints} \; \mt{fs} \; (\mt{uniques1} \rc \mt{uniques2})
+\end{array}$$
+
+A \texttt{UNIQUE} constraint forces a set of columns to be a key, which means that no combination of column values may occur more than once in the table. The $\mt{unique1}$ and $\mt{unique}$ arguments are separated out only to ensure that empty \texttt{UNIQUE} constraints are rejected.
+$$\begin{array}{l}
+ \mt{val} \; \mt{unique} : \mt{rest} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \to \mt{unique1} :: \mt{Name} \to \mt{unique} :: \{\mt{Type}\} \\
+ \hspace{.1in} \to [[\mt{unique1}] \sim \mt{unique}] \Rightarrow [[\mt{unique1} = \mt{t}] \rc \mt{unique} \sim \mt{rest}] \\
+ \hspace{.1in} \Rightarrow \mt{sql\_constraint} \; ([\mt{unique1} = \mt{t}] \rc \mt{unique} \rc \mt{rest}) \; ([\mt{unique1}] \rc \mt{map} \; (\lambda \_ \Rightarrow ()) \; \mt{unique})
+\end{array}$$
+
+A \texttt{FOREIGN KEY} constraint connects a set of local columns to a local or remote key, enforcing that the local columns always reference an existent row of the foreign key's table. A local column of type $\mt{t}$ may be linked to a foreign column of type $\mt{option} \; \mt{t}$, and vice versa. We formalize that notion with a type class.
+$$\begin{array}{l}
+ \mt{class} \; \mt{linkable} :: \mt{Type} \to \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{linkable\_same} : \mt{t} ::: \mt{Type} \to \mt{linkable} \; \mt{t} \; \mt{t} \\
+ \mt{val} \; \mt{linkable\_from\_nullable} : \mt{t} ::: \mt{Type} \to \mt{linkable} \; (\mt{option} \; \mt{t}) \; \mt{t} \\
+ \mt{val} \; \mt{linkable\_to\_nullable} : \mt{t} ::: \mt{Type} \to \mt{linkable} \; \mt{t} \; (\mt{option} \; \mt{t})
+\end{array}$$
+
+The $\mt{matching}$ type family uses $\mt{linkable}$ to define when two keys match up type-wise.
+$$\begin{array}{l}
+ \mt{con} \; \mt{matching} :: \{\mt{Type}\} \to \{\mt{Type}\} \to \mt{Type} \\
+ \mt{val} \; \mt{mat\_nil} : \mt{matching} \; [] \; [] \\
+ \mt{val} \; \mt{mat\_cons} : \mt{t1} ::: \mt{Type} \to \mt{rest1} ::: \{\mt{Type}\} \to \mt{t2} ::: \mt{Type} \to \mt{rest2} ::: \{\mt{Type}\} \to \mt{nm1} :: \mt{Name} \to \mt{nm2} :: \mt{Name} \\
+ \hspace{.1in} \to [[\mt{nm1}] \sim \mt{rest1}] \Rightarrow [[\mt{nm2}] \sim \mt{rest2}] \Rightarrow \mt{linkable} \; \mt{t1} \; \mt{t2} \to \mt{matching} \; \mt{rest1} \; \mt{rest2} \\
+ \hspace{.1in} \to \mt{matching} \; ([\mt{nm1} = \mt{t1}] \rc \mt{rest1}) \; ([\mt{nm2} = \mt{t2}] \rc \mt{rest2})
+\end{array}$$
+
+SQL provides a number of different propagation modes for \texttt{FOREIGN KEY} constraints, governing what happens when a row containing a still-referenced foreign key value is deleted or modified to have a different key value. The argument of a propagation mode's type gives the local key type.
+$$\begin{array}{l}
+ \mt{con} \; \mt{propagation\_mode} :: \{\mt{Type}\} \to \mt{Type} \\
+ \mt{val} \; \mt{restrict} : \mt{fs} ::: \{\mt{Type}\} \to \mt{propagation\_mode} \; \mt{fs} \\
+ \mt{val} \; \mt{cascade} : \mt{fs} ::: \{\mt{Type}\} \to \mt{propagation\_mode} \; \mt{fs} \\
+ \mt{val} \; \mt{no\_action} : \mt{fs} ::: \{\mt{Type}\} \to \mt{propagation\_mode} \; \mt{fs} \\
+ \mt{val} \; \mt{set\_null} : \mt{fs} ::: \{\mt{Type}\} \to \mt{propagation\_mode} \; (\mt{map} \; \mt{option} \; \mt{fs})
+\end{array}$$
+
+Finally, we put these ingredient together to define the \texttt{FOREIGN KEY} constraint function.
+$$\begin{array}{l}
+ \mt{val} \; \mt{foreign\_key} : \mt{mine1} ::: \mt{Name} \to \mt{t} ::: \mt{Type} \to \mt{mine} ::: \{\mt{Type}\} \to \mt{munused} ::: \{\mt{Type}\} \to \mt{foreign} ::: \{\mt{Type}\} \\
+ \hspace{.1in} \to \mt{funused} ::: \{\mt{Type}\} \to \mt{nm} ::: \mt{Name} \to \mt{uniques} ::: \{\{\mt{Unit}\}\} \\
+ \hspace{.1in} \to [[\mt{mine1}] \sim \mt{mine}] \Rightarrow [[\mt{mine1} = \mt{t}] \rc \mt{mine} \sim \mt{munused}] \Rightarrow [\mt{foreign} \sim \mt{funused}] \Rightarrow [[\mt{nm}] \sim \mt{uniques}] \\
+ \hspace{.1in} \Rightarrow \mt{matching} \; ([\mt{mine1} = \mt{t}] \rc \mt{mine}) \; \mt{foreign} \\
+ \hspace{.1in} \to \mt{sql\_table} \; (\mt{foreign} \rc \mt{funused}) \; ([\mt{nm} = \mt{map} \; (\lambda \_ \Rightarrow ()) \; \mt{foreign}] \rc \mt{uniques}) \\
+ \hspace{.1in} \to \{\mt{OnDelete} : \mt{propagation\_mode} \; ([\mt{mine1} = \mt{t}] \rc \mt{mine}), \\
+ \hspace{.2in} \mt{OnUpdate} : \mt{propagation\_mode} \; ([\mt{mine1} = \mt{t}] \rc \mt{mine})\} \\
+ \hspace{.1in} \to \mt{sql\_constraint} \; ([\mt{mine1} = \mt{t}] \rc \mt{mine} \rc \mt{munused}) \; []
+\end{array}$$
+
+The last kind of constraint is a \texttt{CHECK} constraint, which attaches a boolean invariant over a row's contents. It is defined using the $\mt{sql\_exp}$ type family, which we discuss in more detail below.
+$$\begin{array}{l}
+ \mt{val} \; \mt{check} : \mt{fs} ::: \{\mt{Type}\} \to \mt{sql\_exp} \; [] \; [] \; \mt{fs} \; \mt{bool} \to \mt{sql\_constraint} \; \mt{fs} \; []
+\end{array}$$
+
+Section \ref{tables} shows the expanded syntax of the $\mt{table}$ declaration and signature item that includes constraints. There is no other way to use constraints with SQL in Ur/Web.
+
\subsubsection{Queries}
--
cgit v1.2.3
From fa9cab290144d669460ddbf20eb7dc079421f143 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 13:21:26 -0400
Subject: Revised query types
---
doc/manual.tex | 92 +++++++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 75 insertions(+), 17 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 34972002..7ecbfafd 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1197,11 +1197,35 @@ $$\begin{array}{l}
There are transactions for reading an HTTP header by name and for getting and setting strongly-typed cookies. Cookies may only be created by the $\mt{cookie}$ declaration form, ensuring that they be named consistently based on module structure.
$$\begin{array}{l}
-\mt{val} \; \mt{requestHeader} : \mt{string} \to \mt{transaction} \; (\mt{option} \; \mt{string}) \\
-\\
-\mt{con} \; \mt{http\_cookie} :: \mt{Type} \to \mt{Type} \\
-\mt{val} \; \mt{getCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{transaction} \; (\mt{option} \; \mt{t}) \\
-\mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit}
+ \mt{val} \; \mt{requestHeader} : \mt{string} \to \mt{transaction} \; (\mt{option} \; \mt{string}) \\
+ \\
+ \mt{con} \; \mt{http\_cookie} :: \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{getCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{transaction} \; (\mt{option} \; \mt{t}) \\
+ \mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
+There are also an abstract $\mt{url}$ type and functions for converting to it, based on the policy defined by \texttt{[allow|deny] url} directives in the project file.
+$$\begin{array}{l}
+ \mt{type} \; \mt{url} \\
+ \mt{val} \; \mt{bless} : \mt{string} \to \mt{url} \\
+ \mt{val} \; \mt{checkUrl} : \mt{string} \to \mt{option} \; \mt{url}
+\end{array}$$
+$\mt{bless}$ raises a runtime error if the string passed to it fails the URL policy.
+
+It's possible for pages to return files of arbitrary MIME types. A file can be input from the user using this data type, along with the $\mt{upload}$ form tag.
+$$\begin{array}{l}
+ \mt{type} \; \mt{file} \\
+ \mt{val} \; \mt{fileName} : \mt{file} \to \mt{option} \; \mt{string} \\
+ \mt{val} \; \mt{fileMimeType} : \mt{file} \to \mt{string} \\
+ \mt{val} \; \mt{fileData} : \mt{file} \to \mt{blob}
+\end{array}$$
+
+A blob can be extracted from a file and returned as the page result. There are bless and check functions for MIME types analogous to those for URLs.
+$$\begin{array}{l}
+ \mt{type} \; \mt{mimeType} \\
+ \mt{val} \; \mt{blessMime} : \mt{string} \to \mt{mimeType} \\
+ \mt{val} \; \mt{checkMime} : \mt{string} \to \mt{option} \; \mt{mimeType} \\
+ \mt{val} \; \mt{returnBlob} : \mt{t} ::: \mt{Type} \to \mt{blob} \to \mt{mimeType} \to \mt{transaction} \; \mt{t}
\end{array}$$
\subsection{SQL}
@@ -1358,7 +1382,7 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{grouped} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
- \hspace{.1in} \to \{\mt{From} : \$(\mt{map} \; \mt{sql\_table} \; \mt{tables}), \\
+ \hspace{.1in} \to \{\mt{From} : \mt{sql\_from\_items} \; \mt{tables}, \\
\hspace{.2in} \mt{Where} : \mt{sql\_exp} \; \mt{tables} \; [] \; [] \; \mt{bool}, \\
\hspace{.2in} \mt{GroupBy} : \mt{sql\_subset} \; \mt{tables} \; \mt{grouped}, \\
\hspace{.2in} \mt{Having} : \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{bool}, \\
@@ -1399,17 +1423,20 @@ $$\begin{array}{l}
Ur values of appropriate types may be injected into SQL expressions.
$$\begin{array}{l}
+ \mt{class} \; \mt{sql\_injectable\_prim} \\
+ \mt{val} \; \mt{sql\_bool} : \mt{sql\_injectable\_prim} \; \mt{bool} \\
+ \mt{val} \; \mt{sql\_int} : \mt{sql\_injectable\_prim} \; \mt{int} \\
+ \mt{val} \; \mt{sql\_float} : \mt{sql\_injectable\_prim} \; \mt{float} \\
+ \mt{val} \; \mt{sql\_string} : \mt{sql\_injectable\_prim} \; \mt{string} \\
+ \mt{val} \; \mt{sql\_time} : \mt{sql\_injectable\_prim} \; \mt{time} \\
+ \mt{val} \; \mt{sql\_blob} : \mt{sql\_injectable\_prim} \; \mt{blob} \\
+ \mt{val} \; \mt{sql\_channel} : \mt{t} ::: \mt{Type} \to \mt{sql\_injectable\_prim} \; (\mt{channel} \; \mt{t}) \\
+ \mt{val} \; \mt{sql\_client} : \mt{sql\_injectable\_prim} \; \mt{client} \\
+ \\
\mt{class} \; \mt{sql\_injectable} \\
- \mt{val} \; \mt{sql\_bool} : \mt{sql\_injectable} \; \mt{bool} \\
- \mt{val} \; \mt{sql\_int} : \mt{sql\_injectable} \; \mt{int} \\
- \mt{val} \; \mt{sql\_float} : \mt{sql\_injectable} \; \mt{float} \\
- \mt{val} \; \mt{sql\_string} : \mt{sql\_injectable} \; \mt{string} \\
- \mt{val} \; \mt{sql\_time} : \mt{sql\_injectable} \; \mt{time} \\
- \mt{val} \; \mt{sql\_option\_bool} : \mt{sql\_injectable} \; (\mt{option} \; \mt{bool}) \\
- \mt{val} \; \mt{sql\_option\_int} : \mt{sql\_injectable} \; (\mt{option} \; \mt{int}) \\
- \mt{val} \; \mt{sql\_option\_float} : \mt{sql\_injectable} \; (\mt{option} \; \mt{float}) \\
- \mt{val} \; \mt{sql\_option\_string} : \mt{sql\_injectable} \; (\mt{option} \; \mt{string}) \\
- \mt{val} \; \mt{sql\_option\_time} : \mt{sql\_injectable} \; (\mt{option} \; \mt{time}) \\
+ \mt{val} \; \mt{sql\_prim} : \mt{t} ::: \mt{Type} \to \mt{sql\_injectable\_prim} \; \mt{t} \to \mt{sql\_injectable} \; \mt{t} \\
+ \mt{val} \; \mt{sql\_option\_prim} : \mt{t} ::: \mt{Type} \to \mt{sql\_injectable\_prim} \; \mt{t} \to \mt{sql\_injectable} \; (\mt{option} \; \mt{t}) \\
+ \\
\mt{val} \; \mt{sql\_inject} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{t} ::: \mt{Type} \to \mt{sql\_injectable} \; \mt{t} \\
\hspace{.1in} \to \mt{t} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{t}
\end{array}$$
@@ -1455,7 +1482,6 @@ $$\begin{array}{l}
\end{array}$$
Finally, we have aggregate functions. The $\mt{COUNT(\ast)}$ syntax is handled specially, since it takes no real argument. The other aggregate functions are placed into a general type family, using constructor classes to restrict usage to properly-typed arguments. The key aspect of the $\mt{sql\_aggregate}$ function's type is the shift of aggregate-function-only fields into unrestricted fields.
-
$$\begin{array}{l}
\mt{val} \; \mt{sql\_count} : \mt{tables} ::: \{\{\mt{Type}\}\} \to \mt{agg} ::: \{\{\mt{Type}\}\} \to \mt{exps} ::: \{\mt{Type}\} \to \mt{sql\_exp} \; \mt{tables} \; \mt{agg} \; \mt{exps} \; \mt{int}
\end{array}$$
@@ -1484,6 +1510,36 @@ $$\begin{array}{l}
\mt{val} \; \mt{sql\_min} : \mt{t} ::: \mt{Type} \to \mt{sql\_maxable} \; \mt{t} \to \mt{sql\_aggregate} \; \mt{t}
\end{array}$$
+\texttt{FROM} clauses are specified using a type family.
+$$\begin{array}{l}
+ \mt{con} \; \mt{sql\_from\_items} :: \{\{\mt{Type}\}\} \to \mt{Type} \\
+ \mt{val} \; \mt{sql\_from\_table} : \mt{t} ::: \mt{Type} \to \mt{fs} ::: \{\mt{Type}\} \to \mt{fieldsOf} \; \mt{t} \; \mt{fs} \to \mt{name} :: \mt{Name} \to \mt{t} \to \mt{sql\_from\_items} \; [\mt{name} = \mt{fs}] \\
+ \mt{val} \; \mt{sql\_from\_comma} : \mt{tabs1} ::: \{\{\mt{Type}\}\} \to \mt{tabs2} ::: \{\{\mt{Type}\}\} \to [\mt{tabs1} \sim \mt{tabs2}] \\
+ \hspace{.1in} \Rightarrow \mt{sql\_from\_items} \; \mt{tabs1} \to \mt{sql\_from\_items} \; \mt{tabs2} \\
+ \hspace{.1in} \to \mt{sql\_from\_items} \; (\mt{tabs1} \rc \mt{tabs2}) \\
+ \mt{val} \; \mt{sql\_inner\_join} : \mt{tabs1} ::: \{\{\mt{Type}\}\} \to \mt{tabs2} ::: \{\{\mt{Type}\}\} \to [\mt{tabs1} \sim \mt{tabs2}] \\
+ \hspace{.1in} \Rightarrow \mt{sql\_from\_items} \; \mt{tabs1} \to \mt{sql\_from\_items} \; \mt{tabs2} \\
+ \hspace{.1in} \to \mt{sql\_exp} \; (\mt{tabs1} \rc \mt{tabs2}) \; [] \; [] \; \mt{bool} \\
+ \hspace{.1in} \to \mt{sql\_from\_items} \; (\mt{tabs1} \rc \mt{tabs2})
+\end{array}$$
+
+Besides these basic cases, outer joins are supported, which requires a type class for turning non-$\mt{option}$ columns into $\mt{option}$ columns.
+$$\begin{array}{l}
+ \mt{class} \; \mt{nullify} :: \mt{Type} \to \mt{Type} \to \mt{Type} \\
+ \mt{val} \; \mt{nullify\_option} : \mt{t} ::: \mt{Type} \to \mt{nullify} \; (\mt{option} \; \mt{t}) \; (\mt{option} \; \mt{t}) \\
+ \mt{val} \; \mt{nullify\_prim} : \mt{t} ::: \mt{Type} \to \mt{sql\_injectable\_prim} \; \mt{t} \to \mt{nullify} \; \mt{t} \; (\mt{option} \; \mt{t})
+\end{array}$$
+
+Left, right, and full outer joins can now be expressed using functions that accept records of $\mt{nullify}$ instances. Here, we give only the type for a left join as an example.
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{sql\_left\_join} : \mt{tabs1} ::: \{\{\mt{Type}\}\} \to \mt{tabs2} ::: \{\{(\mt{Type} \times \mt{Type})\}\} \to [\mt{tabs1} \sim \mt{tabs2}] \\
+ \hspace{.1in} \Rightarrow \$(\mt{map} \; (\lambda \mt{r} \Rightarrow \$(\mt{map} \; (\lambda \mt{p} :: (\mt{Type} \times \mt{Type}) \Rightarrow \mt{nullify} \; \mt{p}.1 \; \mt{p}.2) \; \mt{r})) \; \mt{tabs2}) \\
+ \hspace{.1in} \to \mt{sql\_from\_items} \; \mt{tabs1} \to \mt{sql\_from\_items} \; (\mt{map} \; (\mt{map} \; (\lambda \mt{p} :: (\mt{Type} \times \mt{Type}) \Rightarrow \mt{p}.1)) \; \mt{tabs2}) \\
+ \hspace{.1in} \to \mt{sql\_exp} \; (\mt{tabs1} \rc \mt{map} \; (\mt{map} \; (\lambda \mt{p} :: (\mt{Type} \times \mt{Type}) \Rightarrow \mt{p}.1)) \; \mt{tabs2}) \; [] \; [] \; \mt{bool} \\
+ \hspace{.1in} \to \mt{sql\_from\_items} \; (\mt{tabs1} \rc \mt{map} \; (\mt{map} \; (\lambda \mt{p} :: (\mt{Type} \times \mt{Type}) \Rightarrow \mt{p}.2)) \; \mt{tabs2})
+\end{array}$$
+
We wrap up the definition of query syntax with the types used in representing $\mt{ORDER} \; \mt{BY}$, $\mt{LIMIT}$, and $\mt{OFFSET}$ clauses.
$$\begin{array}{l}
\mt{type} \; \mt{sql\_direction} \\
@@ -1669,6 +1725,8 @@ Ur/Web features some syntactic shorthands for building values using the function
\subsection{SQL}
+\subsubsection{\label{tables}Table Declarations}
+
\subsubsection{Queries}
Queries $Q$ are added to the rules for expressions $e$.
--
cgit v1.2.3
From 2daada8672b843c596d110d556cb4d8b136dea85 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 14:15:55 -0400
Subject: on* handlers
---
doc/manual.tex | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 7ecbfafd..9acbc1c5 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1621,10 +1621,13 @@ There is a function for producing an XML tree with a particular tag at its root.
$$\begin{array}{l}
\mt{val} \; \mt{tag} : \mt{attrsGiven} ::: \{\mt{Type}\} \to \mt{attrsAbsent} ::: \{\mt{Type}\} \to \mt{ctxOuter} ::: \{\mt{Unit}\} \to \mt{ctxInner} ::: \{\mt{Unit}\} \\
\hspace{.1in} \to \mt{useOuter} ::: \{\mt{Type}\} \to \mt{useInner} ::: \{\mt{Type}\} \to \mt{bindOuter} ::: \{\mt{Type}\} \to \mt{bindInner} ::: \{\mt{Type}\} \\
- \hspace{.1in} \to \lambda [\mt{attrsGiven} \sim \mt{attrsAbsent}] \; [\mt{useOuter} \sim \mt{useInner}] \; [\mt{bindOuter} \sim \mt{bindInner}] \Rightarrow \$\mt{attrsGiven} \\
+ \hspace{.1in} \to \lambda [\mt{attrsGiven} \sim \mt{attrsAbsent}] \; [\mt{useOuter} \sim \mt{useInner}] \; [\mt{bindOuter} \sim \mt{bindInner}] \\
+ \hspace{.1in} \Rightarrow \mt{option} \; \mt{css\_class} \\
+ \hspace{.1in} \to \$\mt{attrsGiven} \\
\hspace{.1in} \to \mt{tag} \; (\mt{attrsGiven} \rc \mt{attrsAbsent}) \; \mt{ctxOuter} \; \mt{ctxInner} \; \mt{useOuter} \; \mt{bindOuter} \\
\hspace{.1in} \to \mt{xml} \; \mt{ctxInner} \; \mt{useInner} \; \mt{bindInner} \to \mt{xml} \; \mt{ctxOuter} \; (\mt{useOuter} \rc \mt{useInner}) \; (\mt{bindOuter} \rc \mt{bindInner})
\end{array}$$
+Note that any tag may be assigned a CSS class. This is the sole way of making use of the values produced by $\mt{style}$ declarations. Ur/Web itself doesn't deal with the syntax or semantics of style sheets; they can be linked via URLs with \texttt{link} tags. However, Ur/Web does make it easy to calculate upper bounds on usage of CSS classes through program analysis.
Two XML fragments may be concatenated.
$$\begin{array}{l}
@@ -1668,6 +1671,15 @@ $$\begin{array}{l}
\mt{val} \; \mt{sleep} : \mt{int} \to \mt{transaction} \; \mt{unit}
\end{array}$$
+A few functions are available to registers callbacks for particular error events. Respectively, they are triggered on calls to $\mt{error}$, uncaught JavaScript exceptions, failure of remote procedure calls, the severance of the connection serving asynchronous messages, or the occurrence of some other error with that connection. If no handlers are registered for a kind of error, then occurrences of that error are ignored silently.
+$$\begin{array}{l}
+ \mt{val} \; \mt{onError} : (\mt{xbody} \to \mt{transaction} \; \mt{unit}) \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{onFail} : (\mt{string} \to \mt{transaction} \; \mt{unit}) \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{onConnectFail} : \mt{transaction} \; \mt{unit} \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{onDisconnect} : \mt{transaction} \; \mt{unit} \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{onServerError} : (\mt{string} \to \mt{transaction} \; \mt{unit}) \to \mt{transaction} \; \mt{unit}
+\end{array}$$
+
\subsubsection{Functional-Reactive Page Generation}
Most approaches to ``AJAX''-style coding involve imperative manipulation of the DOM tree representing an HTML document's structure. Ur/Web follows the \emph{functional-reactive} approach instead. Programs may allocate mutable \emph{sources} of arbitrary types, and an HTML page is effectively a pure function over the latest values of the sources. The page is not mutated directly, but rather it changes automatically as the sources are mutated.
--
cgit v1.2.3
From 331a548a72381bb798611ac8cd5e8397699bf17d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 14:36:16 -0400
Subject: Constraint syntax
---
doc/manual.tex | 15 +++++++++++++++
1 file changed, 15 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 9acbc1c5..b822b7a2 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1739,6 +1739,21 @@ Ur/Web features some syntactic shorthands for building values using the function
\subsubsection{\label{tables}Table Declarations}
+$\mt{table}$ declarations may include constraints, via these grammar rules.
+$$\begin{array}{rrcll}
+ \textrm{Declarations} & d &::=& \mt{table} \; x : c \; [pk[,]] \; cts \\
+ \textrm{Primary key constraints} & pk &::=& \mt{PRIMARY} \; \mt{KEY} \; K \\
+ \textrm{Keys} & K &::=& f \mid (f, (f,)^+) \\
+ \textrm{Constraint sets} & cts &::=& \mt{CONSTRAINT} f \; ct \mid cts, cts \mid \{\{e\}\} \\
+ \textrm{Constraints} & ct &::=& \mt{UNIQUE} \; K \mid \mt{CHECK} \; E \\
+ &&& \mid \mt{FOREIGN} \; \mt{KEY} \; K \; \mt{REFERENCES} \; F \; (K) \; [\mt{ON} \; \mt{DELETE} \; pr] \; [\mt{ON} \; \mt{UPDATE} \; pr] \\
+ \textrm{Foreign tables} & F &::=& x \mid \{\{e\}\} \\
+ \textrm{Propagation modes} & pr &::=& \mt{NO} \; \mt{ACTION} \mid \mt{RESTRICT} \mid \mt{CASCADE} \mid \mt{SET} \; \mt{NULL}
+\end{array}$$
+
+A signature item $\mt{table} \; \mt{x} : \mt{c}$ is actually elaborated into two signature items: $\mt{con} \; \mt{x\_hidden\_constraints} :: \{\{\mt{Unit}\}\}$ and $\mt{val} \; \mt{x} : \mt{sql\_table} \; \mt{c} \; \mt{x\_hidden\_constraints}$. This is appropriate for common cases where client code doesn't care which keys a table has. It's also possible to include constraints after a $\mt{table}$ signature item, with the same syntax as for $\mt{table}$ declarations. This may look like dependent typing, but it's just a convenience. The constraints are type-checked to determine a constructor $u$ to include in $\mt{val} \; \mt{x} : \mt{sql\_table} \; \mt{c} \; (u \rc \mt{x\_hidden\_constraints})$, and then the expressions are thrown away. Nonetheless, it can be useful for documentation purposes to include table constraint details in signatures. Note that the automatic generation of $\mt{x\_hidden\_constraints}$ leads to a kind of free subtyping with respect to which constraints are defined.
+
+
\subsubsection{Queries}
Queries $Q$ are added to the rules for expressions $e$.
--
cgit v1.2.3
From bb11d221d430a04da3214229cd6f1792f70b6cb8 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 5 May 2009 14:45:21 -0400
Subject: Describe GET/POST
---
doc/manual.tex | 6 ++++++
1 file changed, 6 insertions(+)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index b822b7a2..52403a7e 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1846,6 +1846,12 @@ For both links and actions, direct arguments and local variables mentioned impli
Ur/Web programs generally mix server- and client-side code in a fairly transparent way. The one important restriction is that mixed client-server code must encapsulate all server-side pieces within named functions. This is because execution of such pieces will be implemented by explicit calls to the remote web server, and it is useful to get the programmer's help in designing the interface to be used. For example, this makes it easier to allow a client running an old version of an application to continue interacting with a server that has been upgraded to a new version, if the programmer took care to keep the interfaces of all of the old remote calls the same. The functions implementing these services are assigned names in the same way as normal web entry points, by using module structure.
+\medskip
+
+The HTTP standard suggests that GET requests only be used in ways that generate no side effects. Side effecting operations should use POST requests instead. The Ur/Web compiler enforces this rule strictly, via a simple conservative program analysis. Any page that may have a side effect must be accessed through a form, all of which use POST requests. A page is judged to have a side effect if its code depends syntactically on any of the side-effecting, server-side FFI functions. Links, forms, and most client-side event handlers are not followed during this syntactic traversal, but \texttt{} handlers \emph{are} examined, since they run right away and could just as well be considered parts of main page handlers.
+
+Ur/Web includes a kind of automatic protection against cross site request forgery attacks. Whenever any page execution can have side effects and can also read at least one cookie value, all cookie values must be signed cryptographically, to ensure that the user has come to the current page by submitting a form on a real page generated by the proper server. Signing and signature checking are inserted automatically by the compiler. This prevents attacks like phishing schemes where users are directed to counterfeit pages with forms that submit to your application, where a user's cookies might be submitted without his knowledge, causing some undesired side effect.
+
\section{Compiler Phases}
--
cgit v1.2.3
From c701f11b2ee105af75dbeb4baaf0f2c35bb417e2 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 23 Jun 2009 12:53:47 -0400
Subject: New release
---
CHANGELOG | 13 +++++++++++++
LICENSE | 2 +-
doc/manual.tex | 21 ++++++++++++++++-----
3 files changed, 30 insertions(+), 6 deletions(-)
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index bf072690..b9018446 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,16 @@
+========
+20090623
+========
+
+- Many bug fixes
+- Mutually-recursive datatypes
+- SML-style pattern-matching syntax for "fun", "fn", and local "val"
+- Backwards-incompatible change to syntax of formal constructor parameters to
+ value-level functions, to support the previous change
+- Path map support inspired by SML/NJ CM and MLton ML Basis
+- Start of some new standard library modules
+- Some improvements to JavaScript runtime, including better error handling
+
========
20090505
========
diff --git a/LICENSE b/LICENSE
index 0c963687..0c3fa118 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2008, Adam Chlipala
+Copyright (c) 2008-2009, Adam Chlipala
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/doc/manual.tex b/doc/manual.tex
index 52403a7e..0964133e 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -140,6 +140,7 @@ Here is the complete list of directive forms. ``FFI'' stands for ``foreign func
\item \texttt{jsFunc Module.ident=name} gives the JavaScript name of an FFI value.
\item \texttt{library FILENAME} parses \texttt{FILENAME.urp} and merges its contents with the rest of the current file's contents.
\item \texttt{link FILENAME} adds \texttt{FILENAME} to the list of files to be passed to the GCC linker at the end of compilation. This is most useful for importing extra libraries needed by new FFI modules.
+\item \texttt{path NAME=VALUE} creates a mapping from \texttt{NAME} to \texttt{VALUE}. This mapping may be used at the beginnings of filesystem paths given to various other configuration directives. A path like \texttt{\$NAME/rest} is expanded to \texttt{VALUE/rest}. There is an initial mapping from the empty name (for paths like \texttt{\$/list}) to the directory where the Ur/Web standard library is installed. If you accept the default \texttt{configure} options, this directory is \texttt{/usr/local/lib/urweb/ur}.
\item \texttt{prefix PREFIX} sets the prefix included before every URI within the generated application. The default is \texttt{/}.
\item \texttt{profile} generates an executable that may be used with gprof.
\item \texttt{rewrite KIND FROM TO} gives a rule for rewriting canonical module paths. For instance, the canonical path of a page may be \texttt{Mod1.Mod2.mypage}, while you would rather the page were accessed via a URL containing only \texttt{page}. The directive \texttt{rewrite url Mod1/Mod2/mypage page} would accomplish that. The possible values of \texttt{KIND} determine which kinds of objects are affected. The kind \texttt{all} matches any object, and \texttt{url} matches page URLs. The kinds \texttt{table}, \texttt{sequence}, and \texttt{view} match those sorts of SQL entities, and \texttt{relation} matches any of those three. \texttt{cookie} matches HTTP cookies, and \texttt{style} matches CSS class names. If \texttt{FROM} ends in \texttt{/*}, it is interpreted as a prefix matching rule, and rewriting occurs by replacing only the appropriate prefix of a path with \texttt{TO}. While the actual external names of relations and styles have parts separated by underscores instead of slashes, all rewrite rules must be written in terms of slashes.
@@ -288,6 +289,7 @@ $$\begin{array}{rrcll}
&&& \hat{X} \; p & \textrm{unary constructor} \\
&&& \{(x = p,)^*\} & \textrm{rigid record pattern} \\
&&& \{(x = p,)^+, \ldots\} & \textrm{flexible record pattern} \\
+ &&& p : \tau & \textrm{type annotation} \\
&&& (p) & \textrm{explicit precedence} \\
\\
\textrm{Qualified capitalized variables} & \hat{X} &::=& X & \textrm{not from a module} \\
@@ -304,7 +306,7 @@ $$\begin{array}{rrcll}
&&& e \; e & \textrm{function application} \\
&&& \lambda x : \tau \Rightarrow e & \textrm{function abstraction} \\
&&& e [c] & \textrm{polymorphic function application} \\
- &&& \lambda x \; ? \; \kappa \Rightarrow e & \textrm{polymorphic function abstraction} \\
+ &&& \lambda [x \; ? \; \kappa] \Rightarrow e & \textrm{polymorphic function abstraction} \\
&&& e [\kappa] & \textrm{kind-polymorphic function application} \\
&&& X \Longrightarrow e & \textrm{kind-polymorphic function abstraction} \\
\\
@@ -372,7 +374,7 @@ The notation $[c_1, \ldots, c_n]$ is shorthand for $[c_1 = (), \ldots, c_n = ()]
A tuple type $(\tau_1, \ldots, \tau_n)$ expands to a record type $\{1 = \tau_1, \ldots, n = \tau_n\}$, with natural numbers as field names. A tuple pattern $(p_1, \ldots, p_n)$ expands to a rigid record pattern $\{1 = p_1, \ldots, n = p_n\}$. Positive natural numbers may be used in most places where field names would be allowed.
-In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid X$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
+In general, several adjacent $\lambda$ forms may be combined into one, and kind and type annotations may be omitted, in which case they are implicitly included as wildcards. More formally, for constructor-level abstractions, we can define a new non-terminal $b ::= x \mid (x :: \kappa) \mid X$ and allow composite abstractions of the form $\lambda b^+ \Rightarrow c$, elaborating into the obvious sequence of one core $\lambda$ per element of $b^+$.
For any signature item or declaration that defines some entity to be equal to $A$ with classification annotation $B$ (e.g., $\mt{val} \; x : B = A$), $B$ and the preceding colon (or similar punctuation) may be omitted, in which case it is filled in as a wildcard.
@@ -382,12 +384,16 @@ A signature item or declaration $\mt{class} \; x = \lambda y \Rightarrow c$ may
Handling of implicit and explicit constructor arguments may be tweaked with some prefixes to variable references. An expression $@x$ is a version of $x$ where all implicit constructor arguments have been made explicit. An expression $@@x$ achieves the same effect, additionally halting automatic resolution of type class instances and automatic proving of disjointness constraints. The default is that any prefix of a variable's type consisting only of implicit polymorphism, type class instances, and disjointness obligations is resolved automatically, with the variable treated as having the type that starts after the last implicit element, with suitable unification variables substituted. The same syntax works for variables projected out of modules and for capitalized variables (datatype constructors).
-At the expression level, an analogue is available of the composite $\lambda$ form for constructors. We define the language of binders as $b ::= x \mid (x : \tau) \mid (x \; ? \; \kappa) \mid X \mid [c \sim c]$. A lone variable $x$ as a binder stands for an expression variable of unspecified type.
+At the expression level, an analogue is available of the composite $\lambda$ form for constructors. We define the language of binders as $b ::= p \mid [x] \mid [x \; ? \; \kappa] \mid X \mid [c \sim c]$. A lone variable $[x]$ stands for an implicit constructor variable of unspecified kind. The standard value-level function binder is recovered as the type-annotated pattern form $x : \tau$. It is a compile-time error to include a pattern $p$ that does not match every value of the appropriate type.
-A $\mt{val}$ or $\mt{val} \; \mt{rec}$ declaration may include expression binders before the equal sign, following the binder grammar from the last paragraph. Such declarations are elaborated into versions that add additional $\lambda$s to the fronts of the righthand sides, as appropriate. The keyword $\mt{fun}$ is a synonym for $\mt{val} \; \mt{rec}$.
+A local $\mt{val}$ declaration may bind a pattern instead of just a plain variable. As for function arguments, only irrefutable patterns are legal.
+
+The keyword $\mt{fun}$ is a shorthand for $\mt{val} \; \mt{rec}$ that allows arguments to be specified before the equal sign in the definition of each mutually-recursive function, as in SML. Each curried argument must follow the grammar of the $b$ non-terminal introduced two paragraphs ago. A $\mt{fun}$ declaration is elaborated into a version that adds additional $\lambda$s to the fronts of the righthand sides, as appropriate.
A signature item $\mt{functor} \; X_1 \; (X_2 : S_1) : S_2$ is elaborated into $\mt{structure} \; X_1 : \mt{functor}(X_2 : S_1) : S_2$. A declaration $\mt{functor} \; X_1 \; (X_2 : S_1) : S_2 = M$ is elaborated into $\mt{structure} \; X_1 : \mt{functor}(X_2 : S_1) : S_2 = \mt{functor}(X_2 : S_1) : S_2 = M$.
+An $\mt{open} \; \mt{constraints}$ declaration is implicitly inserted for the argument of every functor at the beginning of the functor body. For every declaration of the form $\mt{structure} \; X : S = \mt{struct} \ldots \mt{end}$, an $\mt{open} \; \mt{constraints} \; X$ declaration is implicitly inserted immediately afterward.
+
A declaration $\mt{table} \; x : \{(c = c,)^*\}$ is elaborated into $\mt{table} \; x : [(c = c,)^*]$
The syntax $\mt{where} \; \mt{type}$ is an alternate form of $\mt{where} \; \mt{con}$.
@@ -644,7 +650,7 @@ $$\infer{\Gamma \vdash e [c] : [x \mapsto c]\tau}{
\Gamma \vdash e : x :: \kappa \to \tau
& \Gamma \vdash c :: \kappa
}
-\quad \infer{\Gamma \vdash \lambda x \; ? \; \kappa \Rightarrow e : x \; ? \; \kappa \to \tau}{
+\quad \infer{\Gamma \vdash \lambda [x \; ? \; \kappa] \Rightarrow e : x \; ? \; \kappa \to \tau}{
\Gamma, x :: \kappa \vdash e : \tau
}$$
@@ -732,6 +738,11 @@ $$\infer{\Gamma \vdash \{\overline{x = p}\} \leadsto \Gamma_n; \{\overline{x = \
& \forall i: \Gamma_i \vdash p_i \leadsto \Gamma_{i+1}; \tau_i
}$$
+$$\infer{\Gamma \vdash p : \tau \leadsto \Gamma'; \tau}{
+ \Gamma \vdash p \leadsto \Gamma'; \tau'
+ & \Gamma \vdash \tau' \equiv \tau
+}$$
+
\subsection{Declaration Typing}
We use an auxiliary judgment $\overline{y}; x; \Gamma \vdash \overline{dc} \leadsto \Gamma'$, expressing the enrichment of $\Gamma$ with the types of the datatype constructors $\overline{dc}$, when they are known to belong to datatype $x$ with type parameters $\overline{y}$.
--
cgit v1.2.3
From 3602f46fee1c01d173177298abd3caa58e3d946b Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 23 Jun 2009 14:05:12 -0400
Subject: Factor out common request functionality, in preparation for
supporting different protocols
---
Makefile.in | 9 +-
doc/manual.tex | 2 +-
include/request.h | 22 +++
include/types.h | 2 +
src/c/driver.c | 481 +++---------------------------------------------------
src/c/request.c | 467 ++++++++++++++++++++++++++++++++++++++++++++++++++++
src/compiler.sml | 4 +-
7 files changed, 520 insertions(+), 467 deletions(-)
create mode 100644 include/request.h
create mode 100644 src/c/request.c
(limited to 'doc')
diff --git a/Makefile.in b/Makefile.in
index 4f50ec81..9347e96f 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -13,7 +13,7 @@ all: smlnj mlton c
smlnj: src/urweb.cm
mlton: bin/urweb
-c: lib/c/urweb.o lib/c/driver.o
+c: lib/c/urweb.o lib/c/request.o lib/c/driver.o
clean:
rm -f src/*.mlton.grm.* src/*.mlton.lex.* \
@@ -21,11 +21,8 @@ clean:
lib/c/*.o
rm -rf .cm src/.cm
-lib/c/urweb.o: src/c/urweb.c include/*.h
- gcc -O3 -I include -c src/c/urweb.c -o lib/c/urweb.o $(CFLAGS)
-
-lib/c/driver.o: src/c/driver.c include/*.h
- gcc -O3 -I include -c src/c/driver.c -o lib/c/driver.o $(CFLAGS)
+lib/c/%.o: src/c/%.c include/*.h
+ gcc -O3 -I include -c $< -o $@ $(CFLAGS)
src/urweb.cm: src/prefix.cm src/sources
cat src/prefix.cm src/sources \
diff --git a/doc/manual.tex b/doc/manual.tex
index 0964133e..22c219ad 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -394,7 +394,7 @@ A signature item $\mt{functor} \; X_1 \; (X_2 : S_1) : S_2$ is elaborated into $
An $\mt{open} \; \mt{constraints}$ declaration is implicitly inserted for the argument of every functor at the beginning of the functor body. For every declaration of the form $\mt{structure} \; X : S = \mt{struct} \ldots \mt{end}$, an $\mt{open} \; \mt{constraints} \; X$ declaration is implicitly inserted immediately afterward.
-A declaration $\mt{table} \; x : \{(c = c,)^*\}$ is elaborated into $\mt{table} \; x : [(c = c,)^*]$
+A declaration $\mt{table} \; x : \{(c = c,)^*\}$ is elaborated into $\mt{table} \; x : [(c = c,)^*]$.
The syntax $\mt{where} \; \mt{type}$ is an alternate form of $\mt{where} \; \mt{con}$.
diff --git a/include/request.h b/include/request.h
new file mode 100644
index 00000000..1111f47f
--- /dev/null
+++ b/include/request.h
@@ -0,0 +1,22 @@
+#ifndef REQUEST_H
+#define REQUEST_H
+
+#include
+
+#include "types.h"
+
+typedef struct uw_rc *uw_request_context;
+
+void uw_request_init(void);
+void uw_sign(const char *in, char *out);
+
+uw_request_context uw_new_request_context(void);
+void uw_free_request_context(uw_request_context);
+
+request_result uw_request(uw_request_context, uw_context, char *request, size_t request_len, int sock);
+
+uw_context uw_request_new_context(void);
+
+void *client_pruner(void *data);
+
+#endif
diff --git a/include/types.h b/include/types.h
index ca9ef152..4a28452b 100644
--- a/include/types.h
+++ b/include/types.h
@@ -40,6 +40,8 @@ typedef struct uw_Basis_file {
typedef enum { SUCCESS, FATAL, BOUNDED_RETRY, UNLIMITED_RETRY, RETURN_BLOB } failure_kind;
+typedef enum { SERVED, KEEP_OPEN, FAILED } request_result;
+
typedef struct input *uw_input;
#define INTS_MAX 50
diff --git a/src/c/driver.c b/src/c/driver.c
index 2140cb2c..56eba9a6 100644
--- a/src/c/driver.c
+++ b/src/c/driver.c
@@ -14,9 +14,9 @@
#include
#include "urweb.h"
+#include "request.h"
int uw_backlog = 10;
-int uw_bufsize = 1024;
typedef struct node {
int fd;
@@ -54,108 +54,16 @@ static int dequeue() {
static pthread_mutex_t queue_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t queue_cond = PTHREAD_COND_INITIALIZER;
-#define MAX_RETRIES 5
-
-static int try_rollback(uw_context ctx) {
- int r = uw_rollback(ctx);
-
- if (r) {
- printf("Error running SQL ROLLBACK\n");
- uw_reset(ctx);
- uw_write(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
- uw_write(ctx, "Content-type: text/plain\r\n\r\n");
- uw_write(ctx, "Error running SQL ROLLBACK\n");
- }
-
- return r;
-}
-
-static uw_context new_context() {
- uw_context ctx = uw_init();
- int retries_left = MAX_RETRIES;
-
- while (1) {
- failure_kind fk = uw_begin_init(ctx);
-
- if (fk == SUCCESS) {
- printf("Database connection initialized.\n");
- break;
- } else if (fk == BOUNDED_RETRY) {
- if (retries_left) {
- printf("Initialization error triggers bounded retry: %s\n", uw_error_message(ctx));
- --retries_left;
- } else {
- printf("Fatal initialization error (out of retries): %s\n", uw_error_message(ctx));
- uw_free(ctx);
- return NULL;
- }
- } else if (fk == UNLIMITED_RETRY)
- printf("Initialization error triggers unlimited retry: %s\n", uw_error_message(ctx));
- else if (fk == FATAL) {
- printf("Fatal initialization error: %s\n", uw_error_message(ctx));
- uw_free(ctx);
- return NULL;
- } else {
- printf("Unknown uw_begin_init return code!\n");
- uw_free(ctx);
- return NULL;
- }
- }
-
- return ctx;
-}
-
-#define KEYSIZE 16
-#define PASSSIZE 4
-
-#define HASH_ALGORITHM MHASH_SHA256
-#define HASH_BLOCKSIZE 32
-#define KEYGEN_ALGORITHM KEYGEN_MCRYPT
-
-int uw_hash_blocksize = HASH_BLOCKSIZE;
-
-static int password[PASSSIZE];
-static unsigned char private_key[KEYSIZE];
-
-static void init_crypto() {
- KEYGEN kg = {{HASH_ALGORITHM, HASH_ALGORITHM}};
- int i;
-
- assert(mhash_get_block_size(HASH_ALGORITHM) == HASH_BLOCKSIZE);
-
- for (i = 0; i < PASSSIZE; ++i)
- password[i] = rand();
-
- if (mhash_keygen_ext(KEYGEN_ALGORITHM, kg,
- private_key, sizeof(private_key),
- (unsigned char*)password, sizeof(password)) < 0) {
- printf("Key generation failed\n");
- exit(1);
- }
-}
-
-void uw_sign(const char *in, char *out) {
- MHASH td;
-
- td = mhash_hmac_init(HASH_ALGORITHM, private_key, sizeof(private_key),
- mhash_get_hash_pblock(HASH_ALGORITHM));
-
- mhash(td, in, strlen(in));
- if (mhash_hmac_deinit(td, out) < 0)
- printf("Signing failed");
-}
-
static void *worker(void *data) {
- int me = *(int *)data, retries_left = MAX_RETRIES;
- uw_context ctx = new_context();
+ int me = *(int *)data;
+ uw_context ctx = uw_request_new_context();
size_t buf_size = 2;
char *buf = malloc(buf_size);
- size_t path_copy_size = 0;
- char *path_copy = malloc(path_copy_size);
+ uw_request_context rc = uw_new_request_context();
while (1) {
- char *back = buf, *s, *post;
- int sock, dont_close = 0;
+ char *back = buf;
+ int sock;
pthread_mutex_lock(&queue_mutex);
while (empty())
@@ -166,8 +74,8 @@ static void *worker(void *data) {
printf("Handling connection with thread #%d.\n", me);
while (1) {
- unsigned retries_left = MAX_RETRIES;
int r;
+ char *s1, *s2;
if (back - buf == buf_size - 1) {
char *new_buf;
@@ -189,358 +97,40 @@ static void *worker(void *data) {
break;
}
- //printf("Received %d bytes.\n", r);
-
back += r;
*back = 0;
- if (s = strstr(buf, "\r\n\r\n")) {
- failure_kind fk;
- int is_post = 0, do_normal_send = 1;
- char *boundary = NULL;
- size_t boundary_len;
- char *cmd, *path, *headers, *inputs, *after_headers;
-
- //printf("All: %s\n", buf);
-
- s[2] = 0;
- after_headers = s + 4;
-
- if (!(s = strstr(buf, "\r\n"))) {
- fprintf(stderr, "No newline in buf\n");
- break;
- }
-
- *s = 0;
- headers = s + 2;
- cmd = s = buf;
-
- //printf("Read: %s\n", buf);
-
- if (!strsep(&s, " ")) {
- fprintf(stderr, "No first space in HTTP command\n");
- break;
- }
-
- uw_set_headers(ctx, headers);
-
- if (!strcmp(cmd, "POST")) {
- char *clen_s = uw_Basis_requestHeader(ctx, "Content-length");
- if (!clen_s) {
- fprintf(stderr, "No Content-length with POST\n");
- goto done;
- }
- int clen = atoi(clen_s);
- if (clen < 0) {
- fprintf(stderr, "Negative Content-length with POST\n");
- goto done;
- }
-
- while (back - after_headers < clen) {
- if (back - buf == buf_size - 1) {
- char *new_buf;
- buf_size *= 2;
- new_buf = realloc(buf, buf_size);
-
- back = new_buf + (back - buf);
- headers = new_buf + (headers - buf);
- uw_headers_moved(ctx, headers);
- after_headers = new_buf + (after_headers - buf);
- s = new_buf + (s - buf);
-
- buf = new_buf;
- }
-
- r = recv(sock, back, buf_size - 1 - (back - buf), 0);
-
- if (r < 0) {
- fprintf(stderr, "Recv failed\n");
- goto done;
- }
-
- if (r == 0) {
- printf("Connection closed.\n");
- goto done;
- }
-
- back += r;
- *back = 0;
- }
+ if ((s1 = strstr(buf, "\r\n\r\n"))) {
+ request_result rr;
- is_post = 1;
+ if ((s2 = strcasestr(buf, "\r\nContent-Length: ")) && s2 < s1) {
+ int clen;
- clen_s = uw_Basis_requestHeader(ctx, "Content-type");
- if (clen_s && !strncasecmp(clen_s, "multipart/form-data", 19)) {
- if (strncasecmp(clen_s + 19, "; boundary=", 11)) {
- fprintf(stderr, "Bad multipart boundary spec");
- break;
- }
-
- boundary = clen_s + 28;
- boundary[0] = '-';
- boundary[1] = '-';
- boundary_len = strlen(boundary);
- }
- } else if (strcmp(cmd, "GET")) {
- fprintf(stderr, "Not ready for non-GET/POST command: %s\n", cmd);
- break;
- }
-
- path = s;
- if (!strsep(&s, " ")) {
- fprintf(stderr, "No second space in HTTP command\n");
- break;
- }
-
- if (!strcmp(path, "/.msgs")) {
- char *id = uw_Basis_requestHeader(ctx, "UrWeb-Client");
- char *pass = uw_Basis_requestHeader(ctx, "UrWeb-Pass");
-
- if (id && pass) {
- unsigned idn = atoi(id);
- uw_client_connect(idn, atoi(pass), sock);
- dont_close = 1;
- fprintf(stderr, "Processed request for messages by client %u\n\n", idn);
- }
- else {
- fprintf(stderr, "Missing fields in .msgs request: %s, %s\n\n", id, pass);
- }
- break;
- }
-
- if (boundary) {
- char *part = after_headers, *after_sub_headers, *header, *after_header;
- size_t part_len;
-
- part = strstr(part, boundary);
- if (!part) {
- fprintf(stderr, "Missing first multipart boundary\n");
+ if (sscanf(s2 + 18, "%d\r\n", &clen) != 1) {
+ fprintf(stderr, "Malformed Content-Length header\n");
break;
}
- part += boundary_len;
-
- while (1) {
- char *name = NULL, *filename = NULL, *type = NULL;
-
- if (part[0] == '-' && part[1] == '-')
- break;
-
- if (*part != '\r') {
- fprintf(stderr, "No \\r after multipart boundary\n");
- goto done;
- }
- ++part;
- if (*part != '\n') {
- fprintf(stderr, "No \\n after multipart boundary\n");
- goto done;
- }
- ++part;
-
- if (!(after_sub_headers = strstr(part, "\r\n\r\n"))) {
- fprintf(stderr, "Missing end of headers after multipart boundary\n");
- goto done;
- }
- after_sub_headers[2] = 0;
- after_sub_headers += 4;
-
- for (header = part; after_header = strstr(header, "\r\n"); header = after_header + 2) {
- char *colon, *after_colon;
-
- *after_header = 0;
- if (!(colon = strchr(header, ':'))) {
- fprintf(stderr, "Missing colon in multipart sub-header\n");
- goto done;
- }
- *colon++ = 0;
- if (*colon++ != ' ') {
- fprintf(stderr, "No space after colon in multipart sub-header\n");
- goto done;
- }
-
- if (!strcasecmp(header, "Content-Disposition")) {
- if (strncmp(colon, "form-data; ", 11)) {
- fprintf(stderr, "Multipart data is not \"form-data\"\n");
- goto done;
- }
-
- for (colon += 11; after_colon = strchr(colon, '='); colon = after_colon) {
- char *data;
- after_colon[0] = 0;
- if (after_colon[1] != '"') {
- fprintf(stderr, "Disposition setting is missing initial quote\n");
- goto done;
- }
- data = after_colon+2;
- if (!(after_colon = strchr(data, '"'))) {
- fprintf(stderr, "Disposition setting is missing final quote\n");
- goto done;
- }
- after_colon[0] = 0;
- ++after_colon;
- if (after_colon[0] == ';' && after_colon[1] == ' ')
- after_colon += 2;
-
- if (!strcasecmp(colon, "name"))
- name = data;
- else if (!strcasecmp(colon, "filename"))
- filename = data;
- }
- } else if (!strcasecmp(header, "Content-Type")) {
- type = colon;
- }
- }
-
- part = memmem(after_sub_headers, back - after_sub_headers, boundary, boundary_len);
- if (!part) {
- fprintf(stderr, "Missing boundary after multipart payload\n");
- goto done;
- }
- part[-2] = 0;
- part_len = part - after_sub_headers - 2;
- part[0] = 0;
- part += boundary_len;
-
- if (filename) {
- uw_Basis_file f = {filename, type, {part_len, after_sub_headers}};
-
- if (uw_set_file_input(ctx, name, f)) {
- puts(uw_error_message(ctx));
- goto done;
- }
- } else if (uw_set_input(ctx, name, after_sub_headers)) {
- puts(uw_error_message(ctx));
- goto done;
- }
- }
- }
- else {
- if (is_post)
- inputs = after_headers;
- else if (inputs = strchr(path, '?'))
- *inputs++ = 0;
-
- if (inputs) {
- char *name, *value;
-
- while (*inputs) {
- name = inputs;
- if (inputs = strchr(inputs, '&'))
- *inputs++ = 0;
- else
- inputs = strchr(name, 0);
-
- if (value = strchr(name, '=')) {
- *value++ = 0;
- if (uw_set_input(ctx, name, value)) {
- puts(uw_error_message(ctx));
- goto done;
- }
- }
- else if (uw_set_input(ctx, name, "")) {
- puts(uw_error_message(ctx));
- goto done;
- }
- }
- }
- }
-
- printf("Serving URI %s....\n", path);
-
- while (1) {
- size_t path_len = strlen(path);
-
- uw_write_header(ctx, "HTTP/1.1 200 OK\r\n");
-
- if (path_len + 1 > path_copy_size) {
- path_copy_size = path_len + 1;
- path_copy = realloc(path_copy, path_copy_size);
- }
- strcpy(path_copy, path);
- fk = uw_begin(ctx, path_copy);
- if (fk == SUCCESS || fk == RETURN_BLOB) {
- uw_commit(ctx);
- break;
- } else if (fk == BOUNDED_RETRY) {
- if (retries_left) {
- printf("Error triggers bounded retry: %s\n", uw_error_message(ctx));
- --retries_left;
- }
- else {
- printf("Fatal error (out of retries): %s\n", uw_error_message(ctx));
-
- try_rollback(ctx);
-
- uw_reset_keep_error_message(ctx);
- uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
- uw_write_header(ctx, "Content-type: text/plain\r\n");
- uw_write(ctx, "Fatal error (out of retries): ");
- uw_write(ctx, uw_error_message(ctx));
- uw_write(ctx, "\n");
-
- break;
- }
- } else if (fk == UNLIMITED_RETRY)
- printf("Error triggers unlimited retry: %s\n", uw_error_message(ctx));
- else if (fk == FATAL) {
- printf("Fatal error: %s\n", uw_error_message(ctx));
-
- try_rollback(ctx);
-
- uw_reset_keep_error_message(ctx);
- uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\r\n");
- uw_write_header(ctx, "Content-type: text/html\r\n");
- uw_write(ctx, "Fatal Error");
- uw_write(ctx, "Fatal error: ");
- uw_write(ctx, uw_error_message(ctx));
- uw_write(ctx, "\n");
-
- break;
- } else {
- printf("Unknown uw_handle return code!\n");
-
- try_rollback(ctx);
- uw_reset_keep_request(ctx);
- uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
- uw_write_header(ctx, "Content-type: text/plain\r\n");
- uw_write(ctx, "Unknown uw_handle return code!\n");
-
- break;
- }
-
- if (try_rollback(ctx))
- break;
-
- uw_reset_keep_request(ctx);
+ if (s1 + 4 + clen > back)
+ continue;
}
+ rr = uw_request(rc, ctx, buf, back - buf, sock);
uw_send(ctx, sock);
- printf("Done with client.\n\n");
- uw_memstats(ctx);
+ if (rr == SERVED || rr == FAILED)
+ close(sock);
+ else if (rr != KEEP_OPEN)
+ fprintf(stderr, "Illegal uw_request return code: %d\n", rr);
+
break;
}
}
- done:
- if (!dont_close)
- close(sock);
uw_reset(ctx);
}
}
-static void *client_pruner(void *data) {
- uw_context ctx = new_context();
-
- if (!ctx)
- exit(1);
-
- while (1) {
- uw_prune_clients(ctx);
- sleep(5);
- }
-}
-
static void help(char *cmd) {
printf("Usage: %s [-p ] [-t ]\n", cmd);
}
@@ -550,32 +140,6 @@ static void sigint(int signum) {
exit(0);
}
-static void initialize() {
- uw_context ctx;
- failure_kind fk;
-
- init_crypto();
-
- ctx = new_context();
-
- if (!ctx)
- exit(1);
-
- for (fk = uw_initialize(ctx); fk == UNLIMITED_RETRY; fk = uw_initialize(ctx)) {
- printf("Unlimited retry during init: %s\n", uw_error_message(ctx));
- uw_db_rollback(ctx);
- uw_reset(ctx);
- }
-
- if (fk != SUCCESS) {
- printf("Failed to initialize database! %s\n", uw_error_message(ctx));
- uw_db_rollback(ctx);
- exit(1);
- }
-
- uw_free(ctx);
-}
-
int main(int argc, char *argv[]) {
// The skeleton for this function comes from Beej's sockets tutorial.
int sockfd; // listen on sock_fd
@@ -622,8 +186,7 @@ int main(int argc, char *argv[]) {
}
}
- uw_global_init();
- initialize();
+ uw_request_init();
names = calloc(nthreads, sizeof(int));
diff --git a/src/c/request.c b/src/c/request.c
new file mode 100644
index 00000000..b13c6118
--- /dev/null
+++ b/src/c/request.c
@@ -0,0 +1,467 @@
+#define _GNU_SOURCE
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+#include "urweb.h"
+
+#define MAX_RETRIES 5
+
+static int try_rollback(uw_context ctx) {
+ int r = uw_rollback(ctx);
+
+ if (r) {
+ printf("Error running SQL ROLLBACK\n");
+ uw_reset(ctx);
+ uw_write(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
+ uw_write(ctx, "Content-type: text/plain\r\n\r\n");
+ uw_write(ctx, "Error running SQL ROLLBACK\n");
+ }
+
+ return r;
+}
+
+uw_context uw_request_new_context() {
+ uw_context ctx = uw_init();
+ int retries_left = MAX_RETRIES;
+
+ while (1) {
+ failure_kind fk = uw_begin_init(ctx);
+
+ if (fk == SUCCESS) {
+ printf("Database connection initialized.\n");
+ break;
+ } else if (fk == BOUNDED_RETRY) {
+ if (retries_left) {
+ printf("Initialization error triggers bounded retry: %s\n", uw_error_message(ctx));
+ --retries_left;
+ } else {
+ printf("Fatal initialization error (out of retries): %s\n", uw_error_message(ctx));
+ uw_free(ctx);
+ return NULL;
+ }
+ } else if (fk == UNLIMITED_RETRY)
+ printf("Initialization error triggers unlimited retry: %s\n", uw_error_message(ctx));
+ else if (fk == FATAL) {
+ printf("Fatal initialization error: %s\n", uw_error_message(ctx));
+ uw_free(ctx);
+ return NULL;
+ } else {
+ printf("Unknown uw_begin_init return code!\n");
+ uw_free(ctx);
+ return NULL;
+ }
+ }
+
+ return ctx;
+}
+
+#define KEYSIZE 16
+#define PASSSIZE 4
+
+#define HASH_ALGORITHM MHASH_SHA256
+#define HASH_BLOCKSIZE 32
+#define KEYGEN_ALGORITHM KEYGEN_MCRYPT
+
+int uw_hash_blocksize = HASH_BLOCKSIZE;
+
+static int password[PASSSIZE];
+static unsigned char private_key[KEYSIZE];
+
+static void init_crypto() {
+ KEYGEN kg = {{HASH_ALGORITHM, HASH_ALGORITHM}};
+ int i;
+
+ assert(mhash_get_block_size(HASH_ALGORITHM) == HASH_BLOCKSIZE);
+
+ for (i = 0; i < PASSSIZE; ++i)
+ password[i] = rand();
+
+ if (mhash_keygen_ext(KEYGEN_ALGORITHM, kg,
+ private_key, sizeof(private_key),
+ (unsigned char*)password, sizeof(password)) < 0) {
+ printf("Key generation failed\n");
+ exit(1);
+ }
+}
+
+void uw_request_init() {
+ uw_context ctx;
+ failure_kind fk;
+
+ uw_global_init();
+
+ ctx = uw_request_new_context();
+
+ if (!ctx)
+ exit(1);
+
+ for (fk = uw_initialize(ctx); fk == UNLIMITED_RETRY; fk = uw_initialize(ctx)) {
+ printf("Unlimited retry during init: %s\n", uw_error_message(ctx));
+ uw_db_rollback(ctx);
+ uw_reset(ctx);
+ }
+
+ if (fk != SUCCESS) {
+ printf("Failed to initialize database! %s\n", uw_error_message(ctx));
+ uw_db_rollback(ctx);
+ exit(1);
+ }
+
+ uw_free(ctx);
+
+ init_crypto();
+}
+
+void uw_sign(const char *in, char *out) {
+ MHASH td;
+
+ td = mhash_hmac_init(HASH_ALGORITHM, private_key, sizeof(private_key),
+ mhash_get_hash_pblock(HASH_ALGORITHM));
+
+ mhash(td, in, strlen(in));
+ if (mhash_hmac_deinit(td, out) < 0)
+ printf("Signing failed");
+}
+
+typedef struct uw_rc {
+ size_t path_copy_size;
+ char *path_copy;
+} *uw_request_context;
+
+uw_request_context uw_new_request_context(void) {
+ uw_request_context r = malloc(sizeof(struct uw_rc));
+ r->path_copy_size = 0;
+ r->path_copy = malloc(0);
+ return r;
+}
+
+void uw_free_request_context(uw_request_context r) {
+ free(r->path_copy);
+ free(r);
+}
+
+request_result uw_request(uw_request_context rc, uw_context ctx, char *request, size_t request_len, int sock) {
+ int retries_left = MAX_RETRIES;
+ char *s;
+ failure_kind fk;
+ int is_post = 0, do_normal_send = 1;
+ char *boundary = NULL;
+ size_t boundary_len;
+ char *cmd, *path, *headers, *inputs, *after_headers;
+
+ if (!(s = strstr(request, "\r\n\r\n"))) {
+ fprintf(stderr, "No end of headers found in request\n");
+ return FAILED;
+ }
+
+ s[2] = 0;
+ after_headers = s + 4;
+
+ if (!(s = strstr(request, "\r\n"))) {
+ fprintf(stderr, "No newline in request\n");
+ return FAILED;
+ }
+
+ *s = 0;
+ headers = s + 2;
+ cmd = s = request;
+
+ if (!strsep(&s, " ")) {
+ fprintf(stderr, "No first space in HTTP command\n");
+ return FAILED;
+ }
+
+ uw_set_headers(ctx, headers);
+
+ if (!strcmp(cmd, "POST")) {
+ char *clen_s = uw_Basis_requestHeader(ctx, "Content-length");
+ if (!clen_s) {
+ fprintf(stderr, "No Content-length with POST\n");
+ return FAILED;
+ }
+ int clen = atoi(clen_s);
+ if (clen < 0) {
+ fprintf(stderr, "Negative Content-length with POST\n");
+ return FAILED;
+ }
+
+ if (request + request_len - after_headers < clen) {
+ fprintf(stderr, "Request doesn't contain all POST data (according to Content-Length)\n");
+ return FAILED;
+ }
+
+ is_post = 1;
+
+ clen_s = uw_Basis_requestHeader(ctx, "Content-type");
+ if (clen_s && !strncasecmp(clen_s, "multipart/form-data", 19)) {
+ if (strncasecmp(clen_s + 19, "; boundary=", 11)) {
+ fprintf(stderr, "Bad multipart boundary spec");
+ return FAILED;
+ }
+
+ boundary = clen_s + 28;
+ boundary[0] = '-';
+ boundary[1] = '-';
+ boundary_len = strlen(boundary);
+ }
+ } else if (strcmp(cmd, "GET")) {
+ fprintf(stderr, "Not ready for non-GET/POST command: %s\n", cmd);
+ return FAILED;
+ }
+
+ path = s;
+ if (!strsep(&s, " ")) {
+ fprintf(stderr, "No second space in HTTP command\n");
+ return FAILED;
+ }
+
+ if (!strcmp(path, "/.msgs")) {
+ char *id = uw_Basis_requestHeader(ctx, "UrWeb-Client");
+ char *pass = uw_Basis_requestHeader(ctx, "UrWeb-Pass");
+
+ if (sock < 0) {
+ fprintf(stderr, ".msgs requested, but not socket supplied\n");
+ return FAILED;
+ }
+
+ if (id && pass) {
+ unsigned idn = atoi(id);
+ uw_client_connect(idn, atoi(pass), sock);
+ fprintf(stderr, "Processed request for messages by client %u\n\n", idn);
+ return KEEP_OPEN;
+ }
+ else {
+ fprintf(stderr, "Missing fields in .msgs request: %s, %s\n\n", id, pass);
+ return FAILED;
+ }
+ }
+
+ if (boundary) {
+ char *part = after_headers, *after_sub_headers, *header, *after_header;
+ size_t part_len;
+
+ part = strstr(part, boundary);
+ if (!part) {
+ fprintf(stderr, "Missing first multipart boundary\n");
+ return FAILED;
+ }
+ part += boundary_len;
+
+ while (1) {
+ char *name = NULL, *filename = NULL, *type = NULL;
+
+ if (part[0] == '-' && part[1] == '-')
+ break;
+
+ if (*part != '\r') {
+ fprintf(stderr, "No \\r after multipart boundary\n");
+ return FAILED;
+ }
+ ++part;
+ if (*part != '\n') {
+ fprintf(stderr, "No \\n after multipart boundary\n");
+ return FAILED;
+ }
+ ++part;
+
+ if (!(after_sub_headers = strstr(part, "\r\n\r\n"))) {
+ fprintf(stderr, "Missing end of headers after multipart boundary\n");
+ return FAILED;
+ }
+ after_sub_headers[2] = 0;
+ after_sub_headers += 4;
+
+ for (header = part; after_header = strstr(header, "\r\n"); header = after_header + 2) {
+ char *colon, *after_colon;
+
+ *after_header = 0;
+ if (!(colon = strchr(header, ':'))) {
+ fprintf(stderr, "Missing colon in multipart sub-header\n");
+ return FAILED;
+ }
+ *colon++ = 0;
+ if (*colon++ != ' ') {
+ fprintf(stderr, "No space after colon in multipart sub-header\n");
+ return FAILED;
+ }
+
+ if (!strcasecmp(header, "Content-Disposition")) {
+ if (strncmp(colon, "form-data; ", 11)) {
+ fprintf(stderr, "Multipart data is not \"form-data\"\n");
+ return FAILED;
+ }
+
+ for (colon += 11; after_colon = strchr(colon, '='); colon = after_colon) {
+ char *data;
+ after_colon[0] = 0;
+ if (after_colon[1] != '"') {
+ fprintf(stderr, "Disposition setting is missing initial quote\n");
+ return FAILED;
+ }
+ data = after_colon+2;
+ if (!(after_colon = strchr(data, '"'))) {
+ fprintf(stderr, "Disposition setting is missing final quote\n");
+ return FAILED;
+ }
+ after_colon[0] = 0;
+ ++after_colon;
+ if (after_colon[0] == ';' && after_colon[1] == ' ')
+ after_colon += 2;
+
+ if (!strcasecmp(colon, "name"))
+ name = data;
+ else if (!strcasecmp(colon, "filename"))
+ filename = data;
+ }
+ } else if (!strcasecmp(header, "Content-Type")) {
+ type = colon;
+ }
+ }
+
+ part = memmem(after_sub_headers, request + request_len - after_sub_headers, boundary, boundary_len);
+ if (!part) {
+ fprintf(stderr, "Missing boundary after multipart payload\n");
+ return FAILED;
+ }
+ part[-2] = 0;
+ part_len = part - after_sub_headers - 2;
+ part[0] = 0;
+ part += boundary_len;
+
+ if (filename) {
+ uw_Basis_file f = {filename, type, {part_len, after_sub_headers}};
+
+ if (uw_set_file_input(ctx, name, f)) {
+ fprintf(stderr, "%s\n", uw_error_message(ctx));
+ return FAILED;
+ }
+ } else if (uw_set_input(ctx, name, after_sub_headers)) {
+ fprintf(stderr, "%s\n", uw_error_message(ctx));
+ return FAILED;
+ }
+ }
+ }
+ else {
+ if (is_post)
+ inputs = after_headers;
+ else if (inputs = strchr(path, '?'))
+ *inputs++ = 0;
+
+ if (inputs) {
+ char *name, *value;
+
+ while (*inputs) {
+ name = inputs;
+ if (inputs = strchr(inputs, '&'))
+ *inputs++ = 0;
+ else
+ inputs = strchr(name, 0);
+
+ if (value = strchr(name, '=')) {
+ *value++ = 0;
+ if (uw_set_input(ctx, name, value)) {
+ fprintf(stderr, "%s\n", uw_error_message(ctx));
+ return FAILED;
+ }
+ }
+ else if (uw_set_input(ctx, name, "")) {
+ fprintf(stderr, "%s\n", uw_error_message(ctx));
+ return FAILED;
+ }
+ }
+ }
+ }
+
+ printf("Serving URI %s....\n", path);
+
+ while (1) {
+ size_t path_len = strlen(path);
+
+ uw_write_header(ctx, "HTTP/1.1 200 OK\r\n");
+
+ if (path_len + 1 > rc->path_copy_size) {
+ rc->path_copy_size = path_len + 1;
+ rc->path_copy = realloc(rc->path_copy, rc->path_copy_size);
+ }
+ strcpy(rc->path_copy, path);
+ fk = uw_begin(ctx, rc->path_copy);
+ if (fk == SUCCESS || fk == RETURN_BLOB) {
+ uw_commit(ctx);
+ return SERVED;
+ } else if (fk == BOUNDED_RETRY) {
+ if (retries_left) {
+ printf("Error triggers bounded retry: %s\n", uw_error_message(ctx));
+ --retries_left;
+ }
+ else {
+ printf("Fatal error (out of retries): %s\n", uw_error_message(ctx));
+
+ try_rollback(ctx);
+
+ uw_reset_keep_error_message(ctx);
+ uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
+ uw_write_header(ctx, "Content-type: text/plain\r\n");
+ uw_write(ctx, "Fatal error (out of retries): ");
+ uw_write(ctx, uw_error_message(ctx));
+ uw_write(ctx, "\n");
+
+ return FAILED;
+ }
+ } else if (fk == UNLIMITED_RETRY)
+ printf("Error triggers unlimited retry: %s\n", uw_error_message(ctx));
+ else if (fk == FATAL) {
+ printf("Fatal error: %s\n", uw_error_message(ctx));
+
+ try_rollback(ctx);
+
+ uw_reset_keep_error_message(ctx);
+ uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\r\n");
+ uw_write_header(ctx, "Content-type: text/html\r\n");
+ uw_write(ctx, "Fatal Error");
+ uw_write(ctx, "Fatal error: ");
+ uw_write(ctx, uw_error_message(ctx));
+ uw_write(ctx, "\n");
+
+ return FAILED;
+ } else {
+ printf("Unknown uw_handle return code!\n");
+
+ try_rollback(ctx);
+
+ uw_reset_keep_request(ctx);
+ uw_write_header(ctx, "HTTP/1.1 500 Internal Server Error\n\r");
+ uw_write_header(ctx, "Content-type: text/plain\r\n");
+ uw_write(ctx, "Unknown uw_handle return code!\n");
+
+ return FAILED;
+ }
+
+ if (try_rollback(ctx))
+ return FAILED;
+
+ uw_reset_keep_request(ctx);
+ }
+}
+
+void *client_pruner(void *data) {
+ uw_context ctx = uw_request_new_context();
+
+ if (!ctx)
+ exit(1);
+
+ while (1) {
+ uw_prune_clients(ctx);
+ sleep(5);
+ }
+}
diff --git a/src/compiler.sml b/src/compiler.sml
index 4209426f..c7c2f65e 100644
--- a/src/compiler.sml
+++ b/src/compiler.sml
@@ -884,11 +884,13 @@ val toSqlify = transform sqlify "sqlify" o toMono_opt2
fun compileC {cname, oname, ename, libs, profile, debug, link = link'} =
let
val urweb_o = clibFile "urweb.o"
+ val request_o = clibFile "request.o"
val driver_o = clibFile "driver.o"
val compile = "gcc " ^ Config.gccArgs ^ " -Wstrict-prototypes -Werror -O3 -I " ^ Config.includ
^ " -c " ^ cname ^ " -o " ^ oname
- val link = "gcc -Werror -O3 -lm -lmhash -pthread " ^ libs ^ " " ^ urweb_o ^ " " ^ oname ^ " " ^ driver_o ^ " -o " ^ ename
+ val link = "gcc -Werror -O3 -lm -lmhash -pthread " ^ libs ^ " " ^ urweb_o ^ " " ^ oname
+ ^ " " ^ request_o ^ " " ^ driver_o ^ " -o " ^ ename
val (compile, link) =
if profile then
--
cgit v1.2.3
From 20b1f5880b6553c42f2a71fd5ad38b865faed6b6 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sun, 12 Jul 2009 13:16:05 -0400
Subject: MySQL query gets up to C linking
---
doc/manual.tex | 6 +-
src/cjr_print.sml | 7 +-
src/mysql.sml | 513 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
src/postgres.sml | 31 +++-
src/settings.sig | 7 +-
src/settings.sml | 10 +-
6 files changed, 547 insertions(+), 27 deletions(-)
(limited to 'doc')
diff --git a/doc/manual.tex b/doc/manual.tex
index 22c219ad..3b57dc1b 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -137,6 +137,7 @@ Here is the complete list of directive forms. ``FFI'' stands for ``foreign func
\item \texttt{effectful Module.ident} registers an FFI function or transaction as having side effects. The optimizer avoids removing, moving, or duplicating calls to such functions. Every effectful FFI function must be registered, or the optimizer may make invalid transformations.
\item \texttt{exe FILENAME} sets the filename to which to write the output executable. The default for file \texttt{P.urp} is \texttt{P.exe}.
\item \texttt{ffi FILENAME} reads the file \texttt{FILENAME.urs} to determine the interface to a new FFI module. The name of the module is calculated from \texttt{FILENAME} in the same way as for normal source files. See the files \texttt{include/urweb.h} and \texttt{src/c/urweb.c} for examples of C headers and implementations for FFI modules. In general, every type or value \texttt{Module.ident} becomes \texttt{uw\_Module\_ident} in C.
+\item \texttt{header FILENAME} adds \texttt{FILENAME} to the list of files to be \texttt{\#include}d in C sources. This is most useful for interfacing with new FFI modules.
\item \texttt{jsFunc Module.ident=name} gives the JavaScript name of an FFI value.
\item \texttt{library FILENAME} parses \texttt{FILENAME.urp} and merges its contents with the rest of the current file's contents.
\item \texttt{link FILENAME} adds \texttt{FILENAME} to the list of files to be passed to the GCC linker at the end of compilation. This is most useful for importing extra libraries needed by new FFI modules.
@@ -193,7 +194,7 @@ We give the Ur language definition in \LaTeX $\;$ math mode, since that is prett
We often write syntax like $e^*$ to indicate zero or more copies of $e$, $e^+$ to indicate one or more copies, and $e,^*$ and $e,^+$ to indicate multiple copies separated by commas. Another separator may be used in place of a comma. The $e$ term may be surrounded by parentheses to indicate grouping; those parentheses should not be included in the actual ASCII.
-We write $\ell$ for literals of the primitive types, for the most part following C conventions. There are $\mt{int}$, $\mt{float}$, and $\mt{string}$ literals.
+We write $\ell$ for literals of the primitive types, for the most part following C conventions. There are $\mt{int}$, $\mt{float}$, $\mt{char}$, and $\mt{string}$ literals. Character literals follow the SML convention instead of the C convention, written like \texttt{\#"a"} instead of \texttt{'a'}.
This version of the manual doesn't include operator precedences; see \texttt{src/urweb.grm} for that.
@@ -610,7 +611,7 @@ $$\infer{\Gamma \vdash \mt{map} \; f \; (c_1 \rc c_2) \equiv \mt{map} \; f \; c_
\subsection{Expression Typing}
-We assume the existence of a function $T$ assigning types to literal constants. It maps integer constants to $\mt{Basis}.\mt{int}$, float constants to $\mt{Basis}.\mt{float}$, and string constants to $\mt{Basis}.\mt{string}$.
+We assume the existence of a function $T$ assigning types to literal constants. It maps integer constants to $\mt{Basis}.\mt{int}$, float constants to $\mt{Basis}.\mt{float}$, character constants to $\mt{Basis}.\mt{char}$, and string constants to $\mt{Basis}.\mt{string}$.
We also refer to a function $\mathcal I$, such that $\mathcal I(\tau)$ ``uses an oracle'' to instantiate all constructor function arguments at the beginning of $\tau$ that are marked implicit; i.e., replace $x_1 ::: \kappa_1 \to \ldots \to x_n ::: \kappa_n \to \tau$ with $[x_1 \mapsto c_1]\ldots[x_n \mapsto c_n]\tau$, where the $c_i$s are inferred and $\tau$ does not start like $x ::: \kappa \to \tau'$.
@@ -1147,6 +1148,7 @@ The idea behind Ur is to serve as the ideal host for embedded domain-specific la
$$\begin{array}{l}
\mt{type} \; \mt{int} \\
\mt{type} \; \mt{float} \\
+ \mt{type} \; \mt{char} \\
\mt{type} \; \mt{string} \\
\mt{type} \; \mt{time} \\
\mt{type} \; \mt{blob} \\
diff --git a/src/cjr_print.sml b/src/cjr_print.sml
index 7d1120b4..fcfa402e 100644
--- a/src/cjr_print.sml
+++ b/src/cjr_print.sml
@@ -1652,7 +1652,7 @@ fun p_exp' par env (e, loc) =
#query (Settings.currentDbms ())
{loc = loc,
- numCols = length outputs,
+ cols = map (fn (_, t) => sql_type_in env t) outputs,
doCols = doCols}]
| SOME (id, query) =>
box [p_list_sepi newline
@@ -1675,7 +1675,7 @@ fun p_exp' par env (e, loc) =
id = id,
query = query,
inputs = map #2 inputs,
- numCols = length outputs,
+ cols = map (fn (_, t) => sql_type_in env t) outputs,
doCols = doCols}],
newline,
@@ -2797,7 +2797,8 @@ fun p_sql env (ds, _) =
box [string "uw_",
string (CharVector.map Char.toLower x),
space,
- p_sqltype env (t, ErrorMsg.dummySpan)]) xts,
+ string (#p_sql_type (Settings.currentDbms ())
+ (sql_type_in env t))]) xts,
case (pk, csts) of
("", []) => box []
| _ => string ",",
diff --git a/src/mysql.sml b/src/mysql.sml
index 7b02c787..2fcdef2d 100644
--- a/src/mysql.sml
+++ b/src/mysql.sml
@@ -31,6 +31,30 @@ open Settings
open Print.PD
open Print
+fun p_sql_type t =
+ case t of
+ Int => "bigint"
+ | Float => "double"
+ | String => "longtext"
+ | Bool => "bool"
+ | Time => "timestamp"
+ | Blob => "longblob"
+ | Channel => "bigint"
+ | Client => "int"
+ | Nullable t => p_sql_type t
+
+fun p_buffer_type t =
+ case t of
+ Int => "MYSQL_TYPE_LONGLONG"
+ | Float => "MYSQL_TYPE_DOUBLE"
+ | String => "MYSQL_TYPE_STRING"
+ | Bool => "MYSQL_TYPE_LONG"
+ | Time => "MYSQL_TYPE_TIME"
+ | Blob => "MYSQL_TYPE_BLOB"
+ | Channel => "MYSQL_TYPE_LONGLONG"
+ | Client => "MYSQL_TYPE_LONG"
+ | Nullable t => p_buffer_type t
+
fun init {dbstring, prepared = ss, tables, views, sequences} =
let
val host = ref NONE
@@ -138,6 +162,10 @@ fun init {dbstring, prepared = ss, tables, views, sequences} =
newline,
uhoh true "Error preparing statement: %s" ["msg"]],
string "}",
+ newline,
+ string "conn->p",
+ string (Int.toString i),
+ string " = stmt;",
newline]
end)
ss,
@@ -253,12 +281,484 @@ fun init {dbstring, prepared = ss, tables, views, sequences} =
newline]
end
-fun query _ = raise Fail "MySQL query"
-fun queryPrepared _ = raise Fail "MySQL queryPrepared"
-fun dml _ = raise Fail "MySQL dml"
-fun dmlPrepared _ = raise Fail "MySQL dmlPrepared"
-fun nextval _ = raise Fail "MySQL nextval"
-fun nextvalPrepared _ = raise Fail "MySQL nextvalPrepared"
+fun p_getcol {wontLeakStrings = _, col = i, typ = t} =
+ let
+ fun getter t =
+ case t of
+ String => box [string "({",
+ newline,
+ string "uw_Basis_string s = uw_malloc(ctx, length",
+ string (Int.toString i),
+ string " + 1);",
+ newline,
+ string "out[",
+ string (Int.toString i),
+ string "].buffer = s;",
+ newline,
+ string "out[",
+ string (Int.toString i),
+ string "].buffer_length = length",
+ string (Int.toString i),
+ string " + 1;",
+ newline,
+ string "mysql_stmt_fetch_column(stmt, &out[",
+ string (Int.toString i),
+ string "], ",
+ string (Int.toString i),
+ string ", 0);",
+ newline,
+ string "s[length",
+ string (Int.toString i),
+ string "] = 0;",
+ newline,
+ string "s;",
+ newline,
+ string "})"]
+ | Blob => box [string "({",
+ newline,
+ string "uw_Basis_blob b = {length",
+ string (Int.toString i),
+ string ", uw_malloc(ctx, length",
+ string (Int.toString i),
+ string ")};",
+ newline,
+ string "out[",
+ string (Int.toString i),
+ string "].buffer = b.data;",
+ newline,
+ string "out[",
+ string (Int.toString i),
+ string "].buffer_length = length",
+ string (Int.toString i),
+ string ";",
+ newline,
+ string "mysql_stmt_fetch_column(stmt, &out[",
+ string (Int.toString i),
+ string "], ",
+ string (Int.toString i),
+ string ", 0);",
+ newline,
+ string "b;",
+ newline,
+ string "})"]
+ | Time => box [string "({",
+ string "MYSQL_TIME *mt = buffer",
+ string (Int.toString i),
+ string ";",
+ newline,
+ newline,
+ string "struct tm t = {mt->second, mt->minute, mt->hour, mt->day, mt->month, mt->year, 0, 0, -1};",
+ newline,
+ string "mktime(&tm);",
+ newline,
+ string "})"]
+ | _ => box [string "buffer",
+ string (Int.toString i)]
+ in
+ case t of
+ Nullable t => box [string "(is_null",
+ string (Int.toString i),
+ string " ? NULL : ",
+ case t of
+ String => getter t
+ | _ => box [string "({",
+ newline,
+ string (p_sql_ctype t),
+ space,
+ string "*tmp = uw_malloc(ctx, sizeof(",
+ string (p_sql_ctype t),
+ string "));",
+ newline,
+ string "*tmp = ",
+ getter t,
+ string ";",
+ newline,
+ string "tmp;",
+ newline,
+ string "})"],
+ string ")"]
+ | _ => box [string "(is_null",
+ string (Int.toString i),
+ string " ? ",
+ box [string "({",
+ string (p_sql_ctype t),
+ space,
+ string "tmp;",
+ newline,
+ string "uw_error(ctx, FATAL, \"Unexpectedly NULL field #",
+ string (Int.toString i),
+ string "\");",
+ newline,
+ string "tmp;",
+ newline,
+ string "})"],
+ string " : ",
+ getter t,
+ string ")"]
+ end
+
+fun queryCommon {loc, query, cols, doCols} =
+ box [string "int n, r;",
+ newline,
+ string "MYSQL_BIND out[",
+ string (Int.toString (length cols)),
+ string "];",
+ newline,
+ p_list_sepi (box []) (fn i => fn t =>
+ let
+ fun buffers t =
+ case t of
+ String => box [string "unsigned long length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | Blob => box [string "unsigned long length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | _ => box [string (p_sql_ctype t),
+ space,
+ string "buffer",
+ string (Int.toString i),
+ string ";",
+ newline]
+ in
+ box [string "my_bool is_null",
+ string (Int.toString i),
+ string ";",
+ newline,
+ case t of
+ Nullable t => buffers t
+ | _ => buffers t,
+ newline]
+ end) cols,
+ newline,
+
+ string "memset(out, 0, sizeof out);",
+ newline,
+ p_list_sepi (box []) (fn i => fn t =>
+ let
+ fun buffers t =
+ case t of
+ String => box []
+ | Blob => box []
+ | _ => box [string "out[",
+ string (Int.toString i),
+ string "].buffer = &buffer",
+ string (Int.toString i),
+ string ";",
+ newline]
+ in
+ box [string "out[",
+ string (Int.toString i),
+ string "].buffer_type = ",
+ string (p_buffer_type t),
+ string ";",
+ newline,
+ string "out[",
+ string (Int.toString i),
+ string "].is_null = &is_null",
+ string (Int.toString i),
+ string ";",
+ newline,
+
+ case t of
+ Nullable t => buffers t
+ | _ => buffers t,
+ newline]
+ end) cols,
+ newline,
+
+ string "if (mysql_stmt_execute(stmt)) uw_error(ctx, FATAL, \"",
+ string (ErrorMsg.spanToString loc),
+ string ": Error executing query\");",
+ newline,
+ newline,
+
+ string "if (mysql_stmt_store_result(stmt)) uw_error(ctx, FATAL, \"",
+ string (ErrorMsg.spanToString loc),
+ string ": Error storing query result\");",
+ newline,
+ newline,
+
+ string "if (mysql_stmt_bind_result(stmt, out)) uw_error(ctx, FATAL, \"",
+ string (ErrorMsg.spanToString loc),
+ string ": Error binding query result\");",
+ newline,
+ newline,
+
+ string "uw_end_region(ctx);",
+ newline,
+ string "while ((r = mysql_stmt_fetch(stmt)) == 0) {",
+ newline,
+ doCols p_getcol,
+ string "}",
+ newline,
+ newline,
+
+ string "if (r != MYSQL_NO_DATA) uw_error(ctx, FATAL, \"",
+ string (ErrorMsg.spanToString loc),
+ string ": query result fetching failed\");",
+ newline]
+
+fun query {loc, cols, doCols} =
+ box [string "uw_conn *conn = uw_get_db(ctx);",
+ newline,
+ string "MYSQL_stmt *stmt = mysql_stmt_init(conn->conn);",
+ newline,
+ string "if (stmt == NULL) uw_error(ctx, \"",
+ string (ErrorMsg.spanToString loc),
+ string ": can't allocate temporary prepared statement\");",
+ newline,
+ string "uw_push_cleanup(ctx, (void (*)(void *))mysql_stmt_close, stmt);",
+ newline,
+ string "if (mysql_stmt_prepare(stmt, query, strlen(query))) uw_error(ctx, FATAL, \"",
+ string (ErrorMsg.spanToString loc),
+ string "\");",
+ newline,
+ newline,
+
+ p_list_sepi (box []) (fn i => fn t =>
+ let
+ fun buffers t =
+ case t of
+ String => box []
+ | Blob => box []
+ | _ => box [string "out[",
+ string (Int.toString i),
+ string "].buffer = &buffer",
+ string (Int.toString i),
+ string ";",
+ newline]
+ in
+ box [string "in[",
+ string (Int.toString i),
+ string "].buffer_type = ",
+ string (p_buffer_type t),
+ string ";",
+ newline,
+
+ case t of
+ Nullable t => box [string "in[",
+ string (Int.toString i),
+ string "].is_null = &is_null",
+ string (Int.toString i),
+ string ";",
+ newline,
+ buffers t]
+ | _ => buffers t,
+ newline]
+ end) cols,
+ newline,
+
+ queryCommon {loc = loc, cols = cols, doCols = doCols, query = string "query"},
+
+ string "uw_pop_cleanup(ctx);",
+ newline]
+
+fun p_ensql t e =
+ case t of
+ Int => box [string "uw_Basis_attrifyInt(ctx, ", e, string ")"]
+ | Float => box [string "uw_Basis_attrifyFloat(ctx, ", e, string ")"]
+ | String => e
+ | Bool => box [string "(", e, string " ? \"TRUE\" : \"FALSE\")"]
+ | Time => box [string "uw_Basis_attrifyTime(ctx, ", e, string ")"]
+ | Blob => box [e, string ".data"]
+ | Channel => box [string "uw_Basis_attrifyChannel(ctx, ", e, string ")"]
+ | Client => box [string "uw_Basis_attrifyClient(ctx, ", e, string ")"]
+ | Nullable String => e
+ | Nullable t => box [string "(",
+ e,
+ string " == NULL ? NULL : ",
+ p_ensql t (box [string "(*", e, string ")"]),
+ string ")"]
+
+fun queryPrepared {loc, id, query, inputs, cols, doCols} =
+ box [string "uw_conn *conn = uw_get_db(ctx);",
+ newline,
+ string "MYSQL_BIND in[",
+ string (Int.toString (length inputs)),
+ string "];",
+ newline,
+ p_list_sepi (box []) (fn i => fn t =>
+ let
+ fun buffers t =
+ case t of
+ String => box [string "unsigned long in_length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | Blob => box [string "unsigned long in_length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | Time => box [string (p_sql_ctype t),
+ space,
+ string "in_buffer",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | _ => box []
+ in
+ box [case t of
+ Nullable t => box [string "my_bool in_is_null",
+ string (Int.toString i),
+ string ";",
+ newline,
+ buffers t]
+ | _ => buffers t,
+ newline]
+ end) inputs,
+ string "MYSQL_STMT *stmt = conn->p",
+ string (Int.toString id),
+ string ";",
+ newline,
+ newline,
+
+ string "memset(in, 0, sizeof in);",
+ newline,
+ p_list_sepi (box []) (fn i => fn t =>
+ let
+ fun buffers t =
+ case t of
+ String => box [string "in[",
+ string (Int.toString i),
+ string "].buffer = arg",
+ string (Int.toString (i + 1)),
+ string ";",
+ newline,
+ string "in_length",
+ string (Int.toString i),
+ string "= in[",
+ string (Int.toString i),
+ string "].buffer_length = strlen(arg",
+ string (Int.toString (i + 1)),
+ string ");",
+ newline,
+ string "in[",
+ string (Int.toString i),
+ string "].length = &in_length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | Blob => box [string "in[",
+ string (Int.toString i),
+ string "].buffer = arg",
+ string (Int.toString (i + 1)),
+ string ".data;",
+ newline,
+ string "in_length",
+ string (Int.toString i),
+ string "= in[",
+ string (Int.toString i),
+ string "].buffer_length = arg",
+ string (Int.toString (i + 1)),
+ string ".size;",
+ newline,
+ string "in[",
+ string (Int.toString i),
+ string "].length = &in_length",
+ string (Int.toString i),
+ string ";",
+ newline]
+ | Time =>
+ let
+ fun oneField dst src =
+ box [string "in_buffer",
+ string (Int.toString i),
+ string ".",
+ string dst,
+ string " = tms.tm_",
+ string src,
+ string ";",
+ newline]
+ in
+ box [string "({",
+ newline,
+ string "struct tm tms;",
+ newline,
+ string "if (localtime_r(&arg",
+ string (Int.toString (i + 1)),
+ string ", &tm) == NULL) uw_error(\"",
+ string (ErrorMsg.spanToString loc),
+ string ": error converting to MySQL time\");",
+ newline,
+ oneField "year" "year",
+ oneField "month" "mon",
+ oneField "day" "mday",
+ oneField "hour" "hour",
+ oneField "minute" "min",
+ oneField "second" "sec",
+ newline,
+ string "in[",
+ string (Int.toString i),
+ string "].buffer = &in_buffer",
+ string (Int.toString i),
+ string ";",
+ newline]
+ end
+
+ | _ => box [string "in[",
+ string (Int.toString i),
+ string "].buffer = &arg",
+ string (Int.toString (i + 1)),
+ string ";",
+ newline]
+ in
+ box [string "in[",
+ string (Int.toString i),
+ string "].buffer_type = ",
+ string (p_buffer_type t),
+ string ";",
+ newline,
+
+ case t of
+ Nullable t => box [string "in[",
+ string (Int.toString i),
+ string "].is_null = &in_is_null",
+ string (Int.toString i),
+ string ";",
+ newline,
+ string "if (arg",
+ string (Int.toString (i + 1)),
+ string " == NULL) {",
+ newline,
+ box [string "in_is_null",
+ string (Int.toString i),
+ string " = 1;",
+ newline],
+ string "} else {",
+ box [case t of
+ String => box []
+ | _ =>
+ box [string (p_sql_ctype t),
+ space,
+ string "arg",
+ string (Int.toString (i + 1)),
+ string " = *arg",
+ string (Int.toString (i + 1)),
+ string ";",
+ newline],
+ string "in_is_null",
+ string (Int.toString i),
+ string " = 0;",
+ newline,
+ buffers t,
+ newline]]
+
+ | _ => buffers t,
+ newline]
+ end) inputs,
+ newline,
+
+ queryCommon {loc = loc, cols = cols, doCols = doCols, query = box [string "\"",
+ string (String.toString query),
+ string "\""]}]
+
+fun dml _ = box []
+fun dmlPrepared _ = box []
+fun nextval _ = box []
+fun nextvalPrepared _ = box []
val () = addDbms {name = "mysql",
header = "mysql/mysql.h",
@@ -276,6 +776,7 @@ val () = addDbms {name = "mysql",
string "}",
newline],
init = init,
+ p_sql_type = p_sql_type,
query = query,
queryPrepared = queryPrepared,
dml = dml,
diff --git a/src/postgres.sml b/src/postgres.sml
index 07a68607..ca71798f 100644
--- a/src/postgres.sml
+++ b/src/postgres.sml
@@ -34,6 +34,18 @@ open Print
val ident = String.translate (fn #"'" => "PRIME"
| ch => str ch)
+fun p_sql_type t =
+ case t of
+ Int => "int8"
+ | Float => "float8"
+ | String => "text"
+ | Bool => "bool"
+ | Time => "timestamp"
+ | Blob => "bytea"
+ | Channel => "int8"
+ | Client => "int4"
+ | Nullable t => p_sql_type t
+
fun p_sql_type_base t =
case t of
Int => "bigint"
@@ -540,7 +552,7 @@ fun p_getcol {wontLeakStrings, col = i, typ = t} =
getter t
end
-fun queryCommon {loc, query, numCols, doCols} =
+fun queryCommon {loc, query, cols, doCols} =
box [string "int n, i;",
newline,
newline,
@@ -564,7 +576,7 @@ fun queryCommon {loc, query, numCols, doCols} =
newline,
string "if (PQnfields(res) != ",
- string (Int.toString numCols),
+ string (Int.toString (length cols)),
string ") {",
newline,
box [string "int nf = PQnfields(res);",
@@ -574,7 +586,7 @@ fun queryCommon {loc, query, numCols, doCols} =
string "uw_error(ctx, FATAL, \"",
string (ErrorMsg.spanToString loc),
string ": Query returned %d columns instead of ",
- string (Int.toString numCols),
+ string (Int.toString (length cols)),
string ":\\n%s\\n%s\", nf, ",
query,
string ", PQerrorMessage(conn));",
@@ -598,13 +610,13 @@ fun queryCommon {loc, query, numCols, doCols} =
string "uw_pop_cleanup(ctx);",
newline]
-fun query {loc, numCols, doCols} =
+fun query {loc, cols, doCols} =
box [string "PGconn *conn = uw_get_db(ctx);",
newline,
string "PGresult *res = PQexecParams(conn, query, 0, NULL, NULL, NULL, NULL, 0);",
newline,
newline,
- queryCommon {loc = loc, numCols = numCols, doCols = doCols, query = string "query"}]
+ queryCommon {loc = loc, cols = cols, doCols = doCols, query = string "query"}]
fun p_ensql t e =
case t of
@@ -623,7 +635,7 @@ fun p_ensql t e =
p_ensql t (box [string "(*", e, string ")"]),
string ")"]
-fun queryPrepared {loc, id, query, inputs, numCols, doCols} =
+fun queryPrepared {loc, id, query, inputs, cols, doCols} =
box [string "PGconn *conn = uw_get_db(ctx);",
newline,
string "const int paramFormats[] = { ",
@@ -662,9 +674,9 @@ fun queryPrepared {loc, id, query, inputs, numCols, doCols} =
string ", NULL, paramValues, paramLengths, paramFormats, 0);"],
newline,
newline,
- queryCommon {loc = loc, numCols = numCols, doCols = doCols, query = box [string "\"",
- string (String.toString query),
- string "\""]}]
+ queryCommon {loc = loc, cols = cols, doCols = doCols, query = box [string "\"",
+ string (String.toString query),
+ string "\""]}]
fun dmlCommon {loc, dml} =
box [string "if (res == NULL) uw_error(ctx, FATAL, \"Out of memory allocating DML result.\");",
@@ -821,6 +833,7 @@ val () = addDbms {name = "postgres",
link = "-lpq",
global_init = box [string "void uw_client_init() { }",
newline],
+ p_sql_type = p_sql_type,
init = init,
query = query,
queryPrepared = queryPrepared,
diff --git a/src/settings.sig b/src/settings.sig
index 5406d1de..14e6338d 100644
--- a/src/settings.sig
+++ b/src/settings.sig
@@ -112,7 +112,7 @@ signature SETTINGS = sig
| Client
| Nullable of sql_type
- val p_sql_type : sql_type -> string
+ val p_sql_ctype : sql_type -> string
val isBlob : sql_type -> bool
val isNotNull : sql_type -> bool
@@ -125,18 +125,19 @@ signature SETTINGS = sig
(* Pass these linker arguments *)
global_init : Print.PD.pp_desc,
(* Define uw_client_init() *)
+ p_sql_type : sql_type -> string,
init : {dbstring : string,
prepared : (string * int) list,
tables : (string * (string * sql_type) list) list,
views : (string * (string * sql_type) list) list,
sequences : string list} -> Print.PD.pp_desc,
(* Define uw_db_init(), uw_db_close(), uw_db_begin(), uw_db_commit(), and uw_db_rollback() *)
- query : {loc : ErrorMsg.span, numCols : int,
+ query : {loc : ErrorMsg.span, cols : sql_type list,
doCols : ({wontLeakStrings : bool, col : int, typ : sql_type} -> Print.PD.pp_desc)
-> Print.PD.pp_desc}
-> Print.PD.pp_desc,
queryPrepared : {loc : ErrorMsg.span, id : int, query : string,
- inputs : sql_type list, numCols : int,
+ inputs : sql_type list, cols : sql_type list,
doCols : ({wontLeakStrings : bool, col : int, typ : sql_type} -> Print.PD.pp_desc)
-> Print.PD.pp_desc}
-> Print.PD.pp_desc,
diff --git a/src/settings.sml b/src/settings.sml
index a242768f..f2c2461d 100644
--- a/src/settings.sml
+++ b/src/settings.sml
@@ -285,7 +285,7 @@ datatype sql_type =
| Client
| Nullable of sql_type
-fun p_sql_type t =
+fun p_sql_ctype t =
let
open Print.PD
open Print
@@ -300,7 +300,7 @@ fun p_sql_type t =
| Channel => "uw_Basis_channel"
| Client => "uw_Basis_client"
| Nullable String => "uw_Basis_string"
- | Nullable t => p_sql_type t ^ "*"
+ | Nullable t => p_sql_ctype t ^ "*"
end
fun isBlob Blob = true
@@ -315,17 +315,18 @@ type dbms = {
header : string,
link : string,
global_init : Print.PD.pp_desc,
+ p_sql_type : sql_type -> string,
init : {dbstring : string,
prepared : (string * int) list,
tables : (string * (string * sql_type) list) list,
views : (string * (string * sql_type) list) list,
sequences : string list} -> Print.PD.pp_desc,
- query : {loc : ErrorMsg.span, numCols : int,
+ query : {loc : ErrorMsg.span, cols : sql_type list,
doCols : ({wontLeakStrings : bool, col : int, typ : sql_type} -> Print.PD.pp_desc)
-> Print.PD.pp_desc}
-> Print.PD.pp_desc,
queryPrepared : {loc : ErrorMsg.span, id : int, query : string,
- inputs : sql_type list, numCols : int,
+ inputs : sql_type list, cols : sql_type list,
doCols : ({wontLeakStrings : bool, col : int, typ : sql_type} -> Print.PD.pp_desc)
-> Print.PD.pp_desc}
-> Print.PD.pp_desc,
@@ -341,6 +342,7 @@ val curDb = ref ({name = "",
header = "",
link = "",
global_init = Print.box [],
+ p_sql_type = fn _ => "",
init = fn _ => Print.box [],
query = fn _ => Print.box [],
queryPrepared = fn _ => Print.box [],
--
cgit v1.2.3
From e89607929e56988040d1b31e62362c16a9b75147 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 18 Jul 2009 13:46:22 -0400
Subject: New command-line options; describe simple SQLite build in demo intro
---
demo/prose | 10 ++++++-
doc/manual.tex | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 88 insertions(+), 6 deletions(-)
(limited to 'doc')
diff --git a/demo/prose b/demo/prose
index 37b49a90..df0ed0a9 100644
--- a/demo/prose
+++ b/demo/prose
@@ -1,6 +1,6 @@
Ur/Web is a domain-specific language for programming web applications backed by SQL databases. It is (strongly) statically-typed (like ML and Haskell) and purely functional (like Haskell). Ur is the base language, and the web-specific features of Ur/Web (mostly) come only in the form of special rules for parsing and optimization. The Ur core looks a lot like Standard ML, with a few Haskell-isms added, and kinder, gentler versions added of many features from dependently-typed languages like the logic behind Coq. The type system is much more expressive than in ML and Haskell, such that well-typed web applications cannot "go wrong," not just in handling single HTTP requests, but across their entire lifetimes of interacting with HTTP clients. Beyond that, Ur is unusual in using ideas from dependent typing to enable very effective metaprogramming, or programming with explicit analysis of type structure. Many common web application components can be built by Ur/Web functions that operate on types, where it seems impossible to achieve similar code re-use in more established statically-typed languages.
-This demo is built automatically from Ur/Web sources and supporting files. If you unpack the Ur/Web source distribution, then the following steps will build you a local version of this demo:
+
This demo is built automatically from Ur/Web sources and supporting files. If you unpack the Ur/Web source distribution, then the following steps will (if you're lucky) build you a local version of this demo. If you're not lucky, you can consult the beginning of the manual for more detailed instructions.
./configure
make
@@ -24,6 +24,14 @@ ProxyPassReverse /Demo/ http://localhost:8080/Demo/
Building the demo also generates a demo.sql file, giving the SQL commands to run to define all of the tables and sequences that the applications expect to see. The file demo.urp contains a database line with the PostgreSQL database that the demo web server will try to connect to.
+The easiest way to get a demo running locally is probably with this alternate command sequence:
+
+
urweb -dbms sqlite -db /path/to/database/file -demo /Demo demo
+sqlite3 /path/to/database/file <demo/demo.sql
+demo/demo.exe
+
+Then you can skip the static content and connect directly to the demo server at http://localhost:8080/Demo/Demo/main, which contains links to the individual demos.
+
The rest of the demo focuses on the individual applications. Follow the links in the lefthand frame to visit the applications, commentary, and syntax-highlighted source code. (An Emacs mode is behind the syntax highlighting.) I recommend visiting the applications in the order listed, since that is the order in which new concepts are introduced.
hello.urp
diff --git a/doc/manual.tex b/doc/manual.tex
index 3b57dc1b..c28d9610 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -56,24 +56,27 @@ make
sudo make install
\end{verbatim}
-Some other packages must be installed for the above to work. At a minimum, you need a standard UNIX shell, with standard UNIX tools like sed and GCC in your execution path; MLton, the whole-program optimizing compiler for Standard ML; and the mhash C library. To build programs that access SQL databases, you also need libpq, the PostgreSQL client library. As of this writing, in the ``testing'' version of Debian Linux, this command will install the more uncommon of these dependencies:
+Some other packages must be installed for the above to work. At a minimum, you need a standard UNIX shell, with standard UNIX tools like sed and GCC in your execution path; MLton, the whole-program optimizing compiler for Standard ML; and the mhash C library. As of this writing, in the ``testing'' version of Debian Linux, this command will install the more uncommon of these dependencies:
+\begin{verbatim}
+apt-get install mlton libmhash-dev
+\end{verbatim}
+To build programs that access SQL databases, you also need one of these client libraries for supported backends.
\begin{verbatim}
-apt-get install mlton libmhash-dev libpq-dev
+apt-get install libpq-dev libmysqlclient15-dev libsqlite3-dev
\end{verbatim}
It is also possible to access the modules of the Ur/Web compiler interactively, within Standard ML of New Jersey. To install the prerequisites in Debian testing:
-
\begin{verbatim}
apt-get install smlnj libsmlnj-smlnj ml-yacc ml-lpt
\end{verbatim}
To begin an interactive session with the Ur compiler modules, run \texttt{make smlnj}, and then, from within an \texttt{sml} session, run \texttt{CM.make "src/urweb.cm";}. The \texttt{Compiler} module is the main entry point.
-To run an SQL-backed application, you will probably want to install the PostgreSQL server. Version 8.3 or higher is required.
+To run an SQL-backed application with a backend besides SQLite, you will probably want to install one of these servers.
\begin{verbatim}
-apt-get install postgresql-8.3
+apt-get install postgresql-8.3 mysql-server-5.0
\end{verbatim}
To use the Emacs mode, you must have a modern Emacs installed. We assume that you already know how to do this, if you're in the business of looking for an Emacs mode. The demo generation facility of the compiler will also call out to Emacs to syntax-highlight code, and that process depends on the \texttt{htmlize} module, which can be installed in Debian testing via:
@@ -165,6 +168,77 @@ To time how long the different compiler phases run, without generating an execut
urweb -timing P
\end{verbatim}
+Some other command-line parameters are accepted:
+\begin{itemize}
+\item \texttt{-db }: Set database connection information, using the format expected by Postgres's \texttt{PQconnectdb()}, which is \texttt{name1=value1 ... nameN=valueN}. The same format is also parsed and used to discover connection parameters for MySQL and SQLite. The only significant settings for MySQL are \texttt{host}, \texttt{hostaddr}, \texttt{port}, \texttt{dbname}, \texttt{user}, and \texttt{password}. The only significant setting for SQLite is \texttt{dbname}, which is interpreted as the filesystem path to the database. Additionally, when using SQLite, a database string may be just a file path.
+
+\item \texttt{-dbms [postgres|mysql|sqlite]}: Sets the database backend to use.
+ \begin{itemize}
+ \item \texttt{postgres}: This is PostgreSQL, the default. Among the supported engines, Postgres best matches the design philosophy behind Ur, with a focus on consistent views of data, even in the face of much concurrency. Different database engines have different quirks of SQL syntax. Ur/Web tends to use Postgres idioms where there are choices to be made, though the compiler translates SQL as needed to support other backends.
+
+ A command sequence like this can initialize a Postgres database, using a file \texttt{app.sql} generated by the compiler:
+ \begin{verbatim}
+createdb app
+psql -f app.sql app
+ \end{verbatim}
+
+ \item \texttt{mysql}: This is MySQL, another popular relational database engine that uses persistent server processes. Ur/Web needs transactions to function properly. Many installations of MySQL use non-transactional storage engines by default. Ur/Web generates table definitions that try to use MySQL's InnoDB engine, which supports transactions. You can edit the first line of a generated \texttt{.sql} file to change this behavior, but it really is true that Ur/Web applications will exhibit bizarre behavior if you choose an engine that ignores transaction commands.
+
+ A command sequence like this can initialize a MySQL database:
+ \begin{verbatim}
+echo "CREATE DATABASE app" | mysql
+mysql -D app
+ (( "bin-path" => "/path/to/hello.exe",
+ "socket" => "/tmp/hello",
+ "check-local" => "disable",
+ "docroot" => "/",
+ "max-procs" => "1"
+ ))
+)
+ \end{verbatim}
+ The least obvious requirement is setting \texttt{max-procs} to 1, so that lighttpd doesn't try to multiplex requests across multiple external processes. This is required for message-passing applications, where a single database of client connections is maintained within a multi-threaded server process. Multiple processes may, however, be used safely with applications that don't use message-passing.
+
+ A FastCGI process reads the environment variable \texttt{URWEB\_NUM\_THREADS} to determine how many threads to spawn for handling client requests. The default is 1.
+ \end{itemize}
+
+\item \texttt{-sql FILENAME}: Set where a database set-up SQL script is written.
+\end{itemize}
+
\section{Ur Syntax}
--
cgit v1.2.3
From 19cd9e965929d541e6714f62154f01b9e487a712 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 18 Jul 2009 15:08:21 -0400
Subject: FFI manual section
---
CHANGELOG | 9 +++++++
doc/manual.tex | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
src/cjr_print.sml | 10 ++++----
3 files changed, 90 insertions(+), 5 deletions(-)
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index b9018446..5eeecef8 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,12 @@
+========
+20090718
+========
+
+- New application protocols: CGI and FastCGI
+- New database backends: MySQL and SQLite
+- More JavaScript events added to tags in standard library
+- New manual section on using the foreign function interface (FFI)
+
========
20090623
========
diff --git a/doc/manual.tex b/doc/manual.tex
index c28d9610..cb3ce586 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1940,6 +1940,82 @@ The HTTP standard suggests that GET requests only be used in ways that generate
Ur/Web includes a kind of automatic protection against cross site request forgery attacks. Whenever any page execution can have side effects and can also read at least one cookie value, all cookie values must be signed cryptographically, to ensure that the user has come to the current page by submitting a form on a real page generated by the proper server. Signing and signature checking are inserted automatically by the compiler. This prevents attacks like phishing schemes where users are directed to counterfeit pages with forms that submit to your application, where a user's cookies might be submitted without his knowledge, causing some undesired side effect.
+\section{The Foreign Function Interface}
+
+It is possible to call your own C and JavaScript code from Ur/Web applications, via the foreign function interface (FFI). The starting point for a new binding is a \texttt{.urs} signature file that presents your external library as a single Ur/Web module (with no nested modules). Compilation conventions map the types and values that you use into C and/or JavaScript types and values.
+
+It is most convenient to encapsulate an FFI binding with a new \texttt{.urp} file, which applications can include with the \texttt{library} directive in their own \texttt{.urp} files. A number of directives are likely to show up in the library's project file.
+
+\begin{itemize}
+\item \texttt{clientOnly Module.ident} registers a value as being allowed only in client-side code.
+\item \texttt{clientToServer Module.ident} declares a type as OK to marshal between clients and servers. By default, abstract FFI types are not allowed to be marshalled, since your library might be maintaining invariants that the simple serialization code doesn't check.
+\item \texttt{effectful Module.ident} registers a function that can have side effects. It is important to remember to use this directive for each such function, or else the optimizer might change program semantics.
+\item \texttt{ffi FILE.urs} names the file giving your library's signature. You can include multiple such files in a single \texttt{.urp} file, and each file \texttt{mod.urp} defines an FFI module \texttt{Mod}.
+\item \texttt{header FILE} requests inclusion of a C header file.
+\item \texttt{jsFunc Module.ident=name} gives a mapping from an Ur name for a value to a JavaScript name.
+\item \texttt{link FILE} requests that \texttt{FILE} be linked into applications. It should be a C object or library archive file, and you are responsible for generating it with your own build process.
+\item \texttt{script URL} requests inclusion of a JavaScript source file within application HTML.
+\item \texttt{serverOnly Module.ident} registers a value as being allowed only in server-side code.
+\end{itemize}
+
+\subsection{Writing C FFI Code}
+
+A server-side FFI type or value \texttt{Module.ident} must have a corresponding type or value definition \texttt{uw\_Module\_ident} in C code. With the current Ur/Web version, it's not generally possible to work with Ur records or complex datatypes in C code, but most other kinds of types are fair game.
+
+\begin{itemize}
+ \item Primitive types defined in \texttt{Basis} are themselves using the standard FFI interface, so you may refer to them like \texttt{uw\_Basis\_t}. See \texttt{include/types.h} for their definitions.
+ \item Enumeration datatypes, which have only constructors that take no arguments, should be defined using C \texttt{enum}s. The type is named as for any other type identifier, and each constructor \texttt{c} gets an enumeration constant named \texttt{uw\_Module\_c}.
+ \item A datatype \texttt{dt} (such as \texttt{Basis.option}) that has one non-value-carrying constructor \texttt{NC} and one value-carrying constructor \texttt{C} gets special treatment. Where \texttt{T} is the type of \texttt{C}'s argument, and where we represent \texttt{T} as \texttt{t} in C, we represent \texttt{NC} with \texttt{NULL}. The representation of \texttt{C} depends on whether we're sure that we don't need to use \texttt{NULL} to represent \texttt{t} values; this condition holds only for strings and complex datatypes. For such types, \texttt{C v} is represented with the C encoding of \texttt{v}, such that the translation of \texttt{dt} is \texttt{t}. For other types, \texttt{C v} is represented with a pointer to the C encoding of v, such that the translation of \texttt{dt} is \texttt{t*}.
+\end{itemize}
+
+The C FFI version of a Ur function with type \texttt{T1 -> ... -> TN -> R} or \texttt{T1 -> ... -> TN -> transaction R} has a C prototype like \texttt{R uw\_Module\_ident(uw\_context, T1, ..., TN)}. Only functions with types of the second form may have side effects. \texttt{uw\_context} is the type of state that persists across handling a client request. Many functions that operate on contexts are prototyped in \texttt{include/urweb.h}. Most should only be used internally by the compiler. A few are useful in general FFI implementation:
+\begin{itemize}
+ \item \begin{verbatim}
+void uw_error(uw_context, failure_kind, const char *fmt, ...);
+ \end{verbatim}
+ Abort the current request processing, giving a \texttt{printf}-style format string and arguments for generating an error message. The \texttt{failure\_kind} argument can be \texttt{FATAL}, to abort the whole execution; \texttt{BOUNDED\_RETRY}, to try processing the request again from the beginning, but failing if this happens too many times; or \texttt{UNLIMITED\_RETRY}, to repeat processing, with no cap on how many times this can recur.
+
+ \item \begin{verbatim}
+void uw_push_cleanup(uw_context, void (*func)(void *), void *arg);
+void uw_pop_cleanup(uw_context);
+ \end{verbatim}
+ Manipulate a stack of actions that should be taken if any kind of error condition arises. Calling the ``pop'' function both removes an action from the stack and executes it.
+
+ \item \begin{verbatim}
+void *uw_malloc(uw_context, size_t);
+ \end{verbatim}
+ A version of \texttt{malloc()} that allocates memory inside a context's heap, which is managed with region allocation. Thus, there is no \texttt{uw\_free()}, but you need to be careful not to keep ad-hoc C pointers to this area of memory.
+
+ For performance and correctness reasons, it is usually preferable to use \texttt{uw\_malloc()} instead of \texttt{malloc()}. The former manipulates a local heap that can be kept allocated across page requests, while the latter uses global data structures that may face contention during concurrent execution.
+
+ \item \begin{verbatim}
+typedef void (*uw_callback)(void *);
+void uw_register_transactional(uw_context, void *data, uw_callback commit,
+ uw_callback rollback, uw_callback free);
+ \end{verbatim}
+ All side effects in Ur/Web programs need to be compatible with transactions, such that any set of actions can be undone at any time. Thus, you should not perform actions with non-local side effects directly; instead, register handlers to be called when the current transaction is committed or rolled back. The arguments here give an arbitary piece of data to be passed to callbacks, a function to call on commit, a function to call on rollback, and a function to call afterward in either case to clean up any allocated resources. A rollback handler may be called after the associated commit handler has already been called, if some later part of the commit process fails.
+
+ To accommodate some stubbornly non-transactional real-world actions like sending an e-mail message, Ur/Web allows the \texttt{rollback} parameter to be \texttt{NULL}. When a transaction commits, all \texttt{commit} actions that have non-\texttt{NULL} rollback actions are tried before any \texttt{commit} actions that have \texttt{NULL} rollback actions. Thus, if a single execution uses only one non-transactional action, and if that action never fails partway through its execution while still causing an observable side effect, then Ur/Web can maintain the transactional abstraction.
+\end{itemize}
+
+
+\subsection{Writing JavaScript FFI Code}
+
+JavaScript is dynamically typed, so Ur/Web type definitions imply no JavaScript code. The JavaScript identifier for each FFI function is set with the \texttt{jsFunc} directive. Each identifier can be defined in any JavaScript file that you ask to include with the \texttt{script} directive.
+
+In contrast to C FFI code, JavaScript FFI functions take no extra context argument. Their argument lists are as you would expect from their Ur types. Only functions whose ranges take the form \texttt{transaction T} should have side effects; the JavaScript ``return type'' of such a function is \texttt{T}. Here are the conventions for representing Ur values in JavaScript.
+
+\begin{itemize}
+\item Integers, floats, strings, characters, and booleans are represented in the usual JavaScript way.
+\item Ur functions are represented with JavaScript functions, currying and all. Only named FFI functions are represented with multiple JavaScript arguments.
+\item An Ur record is represented with a JavaScript record, where Ur field name \texttt{N} translates to JavaScript field name \texttt{\_N}. An exception to this rule is that the empty record is encoded as \texttt{null}.
+\item \texttt{option}-like types receive special handling similar to their handling in C. The ``\texttt{None}'' constructor is \texttt{null}, and a use of the ``\texttt{Some}'' constructor on a value \texttt{v} is either \texttt{v}, if the underlying type doesn't need to use \texttt{null}; or \texttt{\{v:v\}} otherwise.
+\item Any other datatypes represent a non-value-carrying constructor \texttt{C} as \texttt{"\_C"} and an application of a constructor \texttt{C} to value \texttt{v} as \texttt{\{n:"\_C", v:v\}}. This rule only applies to datatypes defined in FFI module signatures; the compiler is free to optimize the representations of other, non-\texttt{option}-like datatypes in arbitrary ways.
+\end{itemize}
+
+It is possible to write JavaScript FFI code that interacts with the functional-reactive structure of a document, but this version of the manual doesn't cover the details.
+
+
\section{Compiler Phases}
The Ur/Web compiler is unconventional in that it relies on a kind of \emph{heuristic compilation}. Not all valid programs will compile successfully. Informally, programs fail to compile when they are ``too higher order.'' Compiler phases do their best to eliminate different kinds of higher order-ness, but some programs just won't compile. This is a trade-off for producing very efficient executables. Compiled Ur/Web programs use native C representations and require no garbage collection.
diff --git a/src/cjr_print.sml b/src/cjr_print.sml
index eccd60c2..83b49719 100644
--- a/src/cjr_print.sml
+++ b/src/cjr_print.sml
@@ -85,11 +85,11 @@ fun p_typ' par env (t, loc) =
(case ListUtil.search #3 (!xncs) of
NONE => raise Fail "CjrPrint: TDatatype marked Option has no constructor with an argument"
| SOME t =>
- case #1 t of
- TDatatype _ => p_typ' par env t
- | TFfi ("Basis", "string") => p_typ' par env t
- | _ => box [p_typ' par env t,
- string "*"])
+ if isUnboxable t then
+ p_typ' par env t
+ else
+ box [p_typ' par env t,
+ string "*"])
| TDatatype (Default, n, _) =>
(box [string "struct",
space,
--
cgit v1.2.3
From 87777778aeaee3e1de767499f86bc22789118a69 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 25 Aug 2009 17:33:13 -0400
Subject: New release
---
CHANGELOG | 8 ++++++
Makefile.in | 2 +-
doc/manual.tex | 8 ++++++
include/types.h | 1 +
src/c/memmem.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
src/compiler.sml | 5 ++--
6 files changed, 104 insertions(+), 3 deletions(-)
create mode 100644 src/c/memmem.c
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index 5eeecef8..c67676f4 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,11 @@
+========
+20090825
+========
+
+- Many bug fixes
+- Remote procedure calls must be marked with the new 'rpc' function.
+- Some tweaks to enable usage on OSX (suggested by Paul Snively)
+
========
20090718
========
diff --git a/Makefile.in b/Makefile.in
index 1aca9590..0d8b998a 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -14,7 +14,7 @@ all: smlnj mlton c
smlnj: src/urweb.cm
mlton: bin/urweb
-OBJS := urweb request queue http cgi fastcgi
+OBJS := urweb request queue http cgi fastcgi memmem
c: $(OBJS:%=lib/c/%.o)
clean:
diff --git a/doc/manual.tex b/doc/manual.tex
index cb3ce586..f1f9c967 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1795,6 +1795,14 @@ $$\begin{array}{l}
Transactions can be run on the client by including them in attributes like the $\mt{Onclick}$ attribute of $\mt{button}$, and GUI widgets like $\mt{ctextbox}$ have $\mt{Source}$ attributes that can be used to connect them to sources, so that their values can be read by code running because of, e.g., an $\mt{Onclick}$ event.
+\subsubsection{Remote Procedure Calls}
+
+Any function call may be made a client-to-server ``remote procedure call'' if the function being called needs no features that are only available to client code. To make a function call an RPC, pass that function call as the argument to $\mt{Basis.rpc}$:
+
+$$\begin{array}{l}
+ \mt{val} \; \mt{rpc} : \mt{t} ::: \mt{Type} \to \mt{transaction} \; \mt{t} \to \mt{transaction} \; \mt{t}
+\end{array}$$
+
\subsubsection{Asynchronous Message-Passing}
To support asynchronous, ``server push'' delivery of messages to clients, any client that might need to receive an asynchronous message is assigned a unique ID. These IDs may be retrieved both on the client and on the server, during execution of code related to a client.
diff --git a/include/types.h b/include/types.h
index 062888af..19eae5ad 100644
--- a/include/types.h
+++ b/include/types.h
@@ -2,6 +2,7 @@
#define URWEB_TYPES_H
#include
+#include
typedef long long uw_Basis_int;
typedef double uw_Basis_float;
diff --git a/src/c/memmem.c b/src/c/memmem.c
new file mode 100644
index 00000000..e0687a28
--- /dev/null
+++ b/src/c/memmem.c
@@ -0,0 +1,83 @@
+/* $NetBSD$ */
+
+/*-
+ * Copyright (c) 2003 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include
+#if defined(LIBC_SCCS) && !defined(lint)
+__RCSID("$NetBSD$");
+#endif /* LIBC_SCCS and not lint */
+
+#if !defined(_KERNEL) && !defined(_STANDALONE)
+#include
+#include
+#else
+#include
+#define _DIAGASSERT(x) (void)0
+#define NULL ((char *)0)
+#endif
+
+/*
+ * memmem() returns the location of the first occurence of data
+ * pattern b2 of size len2 in memory block b1 of size len1 or
+ * NULL if none is found.
+ */
+void *
+memmem(const void *b1, const void *b2, size_t len1, size_t len2)
+{
+ /* Initialize search pointer */
+ char *sp = (char *) b1;
+
+ /* Initialize pattern pointer */
+ char *pp = (char *) b2;
+
+ /* Intialize end of search address space pointer */
+ char *eos = sp + len1 - len2;
+
+ /* Sanity check */
+ if(!(b1 && b2 && len1 && len2))
+ return NULL;
+
+ while (sp <= eos) {
+ if (*sp == *pp)
+ if (memcmp(sp, pp, len2) == 0)
+ return sp;
+
+ sp++;
+ }
+
+ return NULL;
+}
diff --git a/src/compiler.sml b/src/compiler.sml
index c99c0eeb..b7550fed 100644
--- a/src/compiler.sml
+++ b/src/compiler.sml
@@ -901,11 +901,12 @@ fun compileC {cname, oname, ename, libs, profile, debug, link = link'} =
let
val proto = Settings.currentProtocol ()
val urweb_o = clibFile "urweb.o"
+ val memmem_o = clibFile "memmem.o"
val compile = "gcc " ^ Config.gccArgs ^ " -Wstrict-prototypes -Werror -O3 -I " ^ Config.includ
^ " -c " ^ cname ^ " -o " ^ oname
- val link = "gcc -Werror -O3 -lm -lmhash -pthread " ^ libs ^ " " ^ urweb_o ^ " " ^ oname
- ^ " " ^ #link proto ^ " -o " ^ ename
+ val link = "gcc -Werror -O3 -lm -lmhash -pthread " ^ Config.gccArgs ^ " " ^ libs ^ " " ^ urweb_o ^ " " ^ oname
+ ^ " " ^ memmem_o ^ " " ^ #link proto ^ " -o " ^ ename
val (compile, link) =
if profile then
--
cgit v1.2.3
From 9964a6181187df8d3ab19a057d9185fc9a88d789 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Sat, 26 Sep 2009 12:57:01 -0400
Subject: New release
---
CHANGELOG | 9 +++++++++
doc/manual.tex | 4 ++--
2 files changed, 11 insertions(+), 2 deletions(-)
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index a5a11e6c..286ce5cc 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,12 @@
+========
+20090926
+========
+
+- Reimplemented client-side code generation to use an interpreter, rather than
+ compilation to JavaScript; this avoids common browser flaws: lack of
+ optimization of tail calls and occasional bugs in closure handling.
+- Bug fixes
+
========
20090919
========
diff --git a/doc/manual.tex b/doc/manual.tex
index f1f9c967..3a532fbc 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -2015,10 +2015,10 @@ In contrast to C FFI code, JavaScript FFI functions take no extra context argume
\begin{itemize}
\item Integers, floats, strings, characters, and booleans are represented in the usual JavaScript way.
-\item Ur functions are represented with JavaScript functions, currying and all. Only named FFI functions are represented with multiple JavaScript arguments.
+\item Ur functions are represented in an unspecified way. This means that you should not rely on any details of function representation. Named FFI functions are represented as JavaScript functions with as many arguments as their Ur types specify. To call a non-FFI function \texttt{f} on argument \texttt{x}, run \texttt{execF(f, x)}.
\item An Ur record is represented with a JavaScript record, where Ur field name \texttt{N} translates to JavaScript field name \texttt{\_N}. An exception to this rule is that the empty record is encoded as \texttt{null}.
\item \texttt{option}-like types receive special handling similar to their handling in C. The ``\texttt{None}'' constructor is \texttt{null}, and a use of the ``\texttt{Some}'' constructor on a value \texttt{v} is either \texttt{v}, if the underlying type doesn't need to use \texttt{null}; or \texttt{\{v:v\}} otherwise.
-\item Any other datatypes represent a non-value-carrying constructor \texttt{C} as \texttt{"\_C"} and an application of a constructor \texttt{C} to value \texttt{v} as \texttt{\{n:"\_C", v:v\}}. This rule only applies to datatypes defined in FFI module signatures; the compiler is free to optimize the representations of other, non-\texttt{option}-like datatypes in arbitrary ways.
+\item Any other datatypes represent a non-value-carrying constructor \texttt{C} as \texttt{"C"} and an application of a constructor \texttt{C} to value \texttt{v} as \texttt{\{n:"C", v:v\}}. This rule only applies to datatypes defined in FFI module signatures; the compiler is free to optimize the representations of other, non-\texttt{option}-like datatypes in arbitrary ways.
\end{itemize}
It is possible to write JavaScript FFI code that interacts with the functional-reactive structure of a document, but this version of the manual doesn't cover the details.
--
cgit v1.2.3
From 82ed38468f5da48ce6e9f6ec336cf5b11ca4bb4d Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Tue, 6 Oct 2009 15:59:11 -0400
Subject: Initial versioned1 demo working
---
CHANGELOG | 1 +
doc/manual.tex | 5 +++--
src/c/urweb.c | 1 -
3 files changed, 4 insertions(+), 3 deletions(-)
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index d75b807c..0257d617 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -5,6 +5,7 @@ Next
- Bug fixes
- Improvement to choice of line number to cite in record unification error
messages
+- SELECT DISTINCT
========
20090926
diff --git a/doc/manual.tex b/doc/manual.tex
index 3a532fbc..5a46552d 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1469,7 +1469,8 @@ $$\begin{array}{l}
\hspace{.1in} \to \mt{grouped} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedFields} ::: \{\{\mt{Type}\}\} \\
\hspace{.1in} \to \mt{selectedExps} ::: \{\mt{Type}\} \\
- \hspace{.1in} \to \{\mt{From} : \mt{sql\_from\_items} \; \mt{tables}, \\
+ \hspace{.1in} \to \{\mt{Distinct} : \mt{bool}, \\
+ \hspace{.2in} \mt{From} : \mt{sql\_from\_items} \; \mt{tables}, \\
\hspace{.2in} \mt{Where} : \mt{sql\_exp} \; \mt{tables} \; [] \; [] \; \mt{bool}, \\
\hspace{.2in} \mt{GroupBy} : \mt{sql\_subset} \; \mt{tables} \; \mt{grouped}, \\
\hspace{.2in} \mt{Having} : \mt{sql\_exp} \; \mt{grouped} \; \mt{tables} \; [] \; \mt{bool}, \\
@@ -1855,7 +1856,7 @@ Queries $Q$ are added to the rules for expressions $e$.
$$\begin{array}{rrcll}
\textrm{Queries} & Q &::=& (q \; [\mt{ORDER} \; \mt{BY} \; (E \; [o],)^+] \; [\mt{LIMIT} \; N] \; [\mt{OFFSET} \; N]) \\
- \textrm{Pre-queries} & q &::=& \mt{SELECT} \; P \; \mt{FROM} \; T,^+ \; [\mt{WHERE} \; E] \; [\mt{GROUP} \; \mt{BY} \; p,^+] \; [\mt{HAVING} \; E] \\
+ \textrm{Pre-queries} & q &::=& \mt{SELECT} \; [\mt{DISTINCT}] \; P \; \mt{FROM} \; T,^+ \; [\mt{WHERE} \; E] \; [\mt{GROUP} \; \mt{BY} \; p,^+] \; [\mt{HAVING} \; E] \\
&&& \mid q \; R \; q \\
\textrm{Relational operators} & R &::=& \mt{UNION} \mid \mt{INTERSECT} \mid \mt{EXCEPT}
\end{array}$$
diff --git a/src/c/urweb.c b/src/c/urweb.c
index 88e9569d..e49de568 100644
--- a/src/c/urweb.c
+++ b/src/c/urweb.c
@@ -2160,7 +2160,6 @@ char *uw_Basis_sqlifyTime(uw_context ctx, uw_Basis_time t) {
if (localtime_r(&t, &stm)) {
s = uw_malloc(ctx, TIMES_MAX);
- --stm.tm_hour;
len = strftime(s, TIMES_MAX, TIME_FMT, &stm);
r = uw_malloc(ctx, len + 14);
sprintf(r, "'%s'::timestamp", s);
--
cgit v1.2.3
From c1816939cb921097620b88c213d181d0bdba7f29 Mon Sep 17 00:00:00 2001
From: Adam Chlipala
Date: Thu, 26 Nov 2009 14:20:00 -0500
Subject: More fun with cookies
---
CHANGELOG | 7 +++++++
demo/cookie.ur | 27 +++++++++++++++++++++++++--
doc/manual.tex | 3 ++-
include/urweb.h | 4 +++-
lib/ur/basis.urs | 6 +++++-
src/c/urweb.c | 29 +++++++++++++++++++++++++++--
src/demo.sml | 2 +-
src/monoize.sml | 34 +++++++++++++++++++++++++++++-----
src/settings.sml | 1 +
9 files changed, 100 insertions(+), 13 deletions(-)
(limited to 'doc')
diff --git a/CHANGELOG b/CHANGELOG
index 33f9bc9c..25a9b851 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,10 @@
+========
+Next
+========
+
+- Extended cookie interface (breaks backward compatibility for 'setCookie')
+- Bug fixes
+
========
20091124
========
diff --git a/demo/cookie.ur b/demo/cookie.ur
index ad4e19ec..7e011157 100644
--- a/demo/cookie.ur
+++ b/demo/cookie.ur
@@ -1,15 +1,30 @@
cookie c : {A : string, B : float, C : int}
fun set r =
- setCookie c {A = r.A, B = readError r.B, C = readError r.C};
+ setCookie c {Value = {A = r.A, B = readError r.B, C = readError r.C},
+ Expires = None,
+ Secure = False};
return Cookie set.
+fun setExp r =
+ setCookie c {Value = {A = r.A, B = readError r.B, C = readError r.C},
+ Expires = Some (readError "2012-11-6 00:00:00"),
+ Secure = False};
+ return Cookie set robustly.
+
+fun delete () =
+ clearCookie c;
+ return Cookie cleared.
+
fun main () =
ro <- getCookie c;
return
{case ro of
None => No cookie set.
- | Some v => Cookie: A = {[v.A]}, B = {[v.B]}, C = {[v.C]}}
+ | Some v =>
+ Cookie: A = {[v.A]}, B = {[v.B]}, C = {[v.C]}
+
+ }
+
+
diff --git a/doc/manual.tex b/doc/manual.tex
index 5a46552d..866b9585 100644
--- a/doc/manual.tex
+++ b/doc/manual.tex
@@ -1288,7 +1288,8 @@ $$\begin{array}{l}
\\
\mt{con} \; \mt{http\_cookie} :: \mt{Type} \to \mt{Type} \\
\mt{val} \; \mt{getCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{transaction} \; (\mt{option} \; \mt{t}) \\
- \mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{t} \to \mt{transaction} \; \mt{unit}
+ \mt{val} \; \mt{setCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \{\mt{Value} : \mt{t}, \mt{Expires} : \mt{option} \; \mt{time}, \mt{Secure} : \mt{bool}\} \to \mt{transaction} \; \mt{unit} \\
+ \mt{val} \; \mt{clearCookie} : \mt{t} ::: \mt{Type} \to \mt{http\_cookie} \; \mt{t} \to \mt{transaction} \; \mt{unit}
\end{array}$$
There are also an abstract $\mt{url}$ type and functions for converting to it, based on the policy defined by \texttt{[allow|deny] url} directives in the project file.
diff --git a/include/urweb.h b/include/urweb.h
index 55068966..9884a3ca 100644
--- a/include/urweb.h
+++ b/include/urweb.h
@@ -185,7 +185,8 @@ uw_Basis_string uw_Basis_requestHeader(uw_context, uw_Basis_string);
void uw_write_header(uw_context, uw_Basis_string);
uw_Basis_string uw_Basis_get_cookie(uw_context, uw_Basis_string c);
-uw_unit uw_Basis_set_cookie(uw_context, uw_Basis_string prefix, uw_Basis_string c, uw_Basis_string v);
+uw_unit uw_Basis_set_cookie(uw_context, uw_Basis_string prefix, uw_Basis_string c, uw_Basis_string v, uw_Basis_time *expires, uw_Basis_bool secure);
+uw_unit uw_Basis_clear_cookie(uw_context, uw_Basis_string prefix, uw_Basis_string c);
uw_Basis_channel uw_Basis_new_channel(uw_context, uw_unit);
uw_unit uw_Basis_send(uw_context, uw_Basis_channel, uw_Basis_string);
@@ -210,6 +211,7 @@ uw_Basis_int uw_Basis_blobSize(uw_context, uw_Basis_blob);
__attribute__((noreturn)) void uw_return_blob(uw_context, uw_Basis_blob, uw_Basis_string mimeType);
uw_Basis_time uw_Basis_now(uw_context);
+extern const uw_Basis_time uw_Basis_minTime;
void uw_register_transactional(uw_context, void *data, uw_callback commit, uw_callback rollback, uw_callback free);
diff --git a/lib/ur/basis.urs b/lib/ur/basis.urs
index 47bc3d48..31aa4cdd 100644
--- a/lib/ur/basis.urs
+++ b/lib/ur/basis.urs
@@ -115,6 +115,7 @@ val current : t ::: Type -> signal t -> transaction t
(** * Time *)
val now : transaction time
+val minTime : time
(** HTTP operations *)
@@ -123,7 +124,10 @@ val requestHeader : string -> transaction (option string)
con http_cookie :: Type -> Type
val getCookie : t ::: Type -> http_cookie t -> transaction (option t)
-val setCookie : t ::: Type -> http_cookie t -> t -> transaction unit
+val setCookie : t ::: Type -> http_cookie t -> {Value : t,
+ Expires : option time,
+ Secure : bool} -> transaction unit
+val clearCookie : t ::: Type -> http_cookie t -> transaction unit
(** JavaScript-y gadgets *)
diff --git a/src/c/urweb.c b/src/c/urweb.c
index 344ef2ad..cbe065c3 100644
--- a/src/c/urweb.c
+++ b/src/c/urweb.c
@@ -672,7 +672,7 @@ static input *check_input_space(uw_context ctx, size_t len) {
}
int uw_set_input(uw_context ctx, const char *name, char *value) {
- printf("Input name %s\n", name);
+ //printf("Input name %s\n", name);
if (!strcasecmp(name, ".b")) {
int n = uw_input_num(value);
@@ -2680,18 +2680,41 @@ uw_Basis_string uw_Basis_get_cookie(uw_context ctx, uw_Basis_string c) {
return NULL;
}
-uw_unit uw_Basis_set_cookie(uw_context ctx, uw_Basis_string prefix, uw_Basis_string c, uw_Basis_string v) {
+uw_unit uw_Basis_set_cookie(uw_context ctx, uw_Basis_string prefix, uw_Basis_string c, uw_Basis_string v, uw_Basis_time *expires, uw_Basis_bool secure) {
uw_write_header(ctx, "Set-Cookie: ");
uw_write_header(ctx, c);
uw_write_header(ctx, "=");
uw_write_header(ctx, v);
uw_write_header(ctx, "; path=");
uw_write_header(ctx, prefix);
+ if (expires) {
+ char formatted[30];
+ struct tm tm;
+
+ gmtime_r(expires, &tm);
+
+ strftime(formatted, sizeof formatted, "%a, %d-%b-%Y %T GMT", &tm);
+
+ uw_write_header(ctx, "; expires=");
+ uw_write_header(ctx, formatted);
+ }
+ if (secure)
+ uw_write_header(ctx, "; secure");
uw_write_header(ctx, "\r\n");
return uw_unit_v;
}
+uw_unit uw_Basis_clear_cookie(uw_context ctx, uw_Basis_string prefix, uw_Basis_string c) {
+ uw_write_header(ctx, "Set-Cookie: ");
+ uw_write_header(ctx, c);
+ uw_write_header(ctx, "=; path=");
+ uw_write_header(ctx, prefix);
+ uw_write_header(ctx, "; expires=Mon, 01-01-1970 00:00:00 GMT\r\n");
+
+ return uw_unit_v;
+}
+
static delta *allocate_delta(uw_context ctx, unsigned client) {
unsigned i;
delta *d;
@@ -3077,6 +3100,8 @@ uw_Basis_string uw_Basis_mstrcat(uw_context ctx, ...) {
return r;
}
+const uw_Basis_time minTime = 0;
+
uw_Basis_time uw_Basis_now(uw_context ctx) {
return time(NULL);
}
diff --git a/src/demo.sml b/src/demo.sml
index 4e2caa99..c5480a93 100644
--- a/src/demo.sml
+++ b/src/demo.sml
@@ -430,7 +430,7 @@ fun make {prefix, dirname, guided} =
TextIO.closeOut outf;
- Compiler.compile (OS.Path.base fname)
+ Compiler.compiler (OS.Path.base fname)
end;
TextIO.output (demosOut, "\n