diff --git a/paper-new/defs.tex b/paper-new/defs.tex new file mode 100644 index 0000000000000000000000000000000000000000..9d78917937a2b77a37099f69f6adab10a88934df --- /dev/null +++ b/paper-new/defs.tex @@ -0,0 +1,90 @@ +\newcommand{\To}{\Rightarrow} +\newcommand{\inl}{\mathsf{inl}} +\newcommand{\inr}{\mathsf{inr}} + + +\newcommand{\extlc}{\text{Ext-}\lambda C} +\newcommand{\intlc}{\text{Int-}\lambda C} +\newcommand{\erase}[1]{\lfloor {#1} \rfloor} + + +\newcommand{\uarrowl}{\mathrel{\rotatebox[origin=c]{-30}{$\leftarrowtail$}}} +\newcommand{\uarrowr}{\mathrel{\rotatebox[origin=c]{60}{$\leftarrowtail$}}} +\newcommand{\darrowl}{\mathrel{\rotatebox[origin=c]{30}{$\twoheadleftarrow$}}} +\newcommand{\darrowr}{\mathrel{\rotatebox[origin=c]{120}{$\twoheadleftarrow$}}} +\newcommand{\vuarrow}{\mathrel{\rotatebox[origin=c]{-90}{$\leftarrowtail$}}} +\newcommand{\vdarrow}{\mathrel{\rotatebox[origin=c]{90}{$\twoheadleftarrow$}}} + +% Types, terms, and precision + +\newcommand{\dyn}{?} +\newcommand{\nat}{\text{Nat}} +\newcommand{\bool}{\text{Bool}} + +\newcommand{\err}{\mho} +\newcommand{\zro}{\textsf{zro}} +\newcommand{\suc}{\textsf{suc}} +\newcommand{\lda}[2]{\lambda {#1} . {#2}} +\newcommand{\up}[2]{\langle{#2}\uarrowl{#1}\rangle} +\newcommand{\dn}[2]{\langle{#1}\darrowl{#2}\rangle} + +\newcommand{\ltdyn}{\sqsubseteq} +\newcommand{\gtdyn}{\sqsupseteq} +\newcommand{\equidyn}{\mathrel{\gtdyn\ltdyn}} +\newcommand{\gamlt}{\Gamma^\ltdyn} +\newcommand{\hasty}[3]{{#1} \vdash {#2} \colon {#3}} +\newcommand{\etmprec}[4]{{#1} \vdash {#2} \ltdyn_e {#3} \colon {#4}} +\newcommand{\itmprec}[4]{{#1} \vdash {#2} \ltdyn_i {#3} \colon {#4}} +\newcommand{\etmequidyn}[4]{{#1} \vdash {#2} \equidyn_e {#3} \colon {#4}} +\newcommand{\itmequidyn}[4]{{#1} \vdash {#2} \equidyn_i {#3} \colon {#4}} + + + +% SGDT and Intensional Stuff + +\newcommand{\later}{\vartriangleright} +\newcommand{\type}{\texttt{Type}} +\newcommand{\lob}{\text{L\"{o}b}} +\newcommand{\tick}{\mathsf{tick}} +\newcommand{\nxt}{\mathsf{next}} +\newcommand{\fix}{\mathsf{fix}} +\newcommand{\kpa}{\kappa} + +% Model-related stuff +\newcommand{\calC}{\mathcal{C}} +\newcommand{\Set}{\mathsf{Set}} +\newcommand{\Yo}{\mathsf{Yo}} +\newcommand{\Hom}{\mathsf{Hom}} +\newcommand{\calS}{\mathcal{S}} +\newcommand{\gfix}{\texttt{gfix}} +\newcommand{\calU}{\mathcal{U}} +\newcommand{\laterhat}{\widehat{\later}} +\newcommand{\El}{\mathsf{El}} + + +% Predomains and EP pairs +\newcommand{\ty}[1]{\langle {#1} \rangle} +\newcommand{\li}{L_\mho} + +\newcommand{\ltls}{\lesssim} +\newcommand{\bisim}{\approx} + +\newcommand{\injarr}{\textsf{Inj}_\to} +\newcommand{\injnat}{\textsf{Inj}_\mathbb{N}} + +\newcommand{\id}{\mathsf{id}} +\newcommand{\ep}{\leadsto} + +\newcommand{\emb}[2]{\mathsf{emb}_{#1}({#2})} +\newcommand{\proj}[2]{\mathsf{proj}_{#1}({#2})} + +\newcommand{\monto}{\to_m} + + +% Notation for wait functions +\newcommand{\wre}{w_r^e} +\newcommand{\wle}{w_l^e} +\newcommand{\wrp}{w_r^p} +\newcommand{\wlp}{w_l^p} + +\newcommand{\sem}[1]{\llbracket {#1} \rrbracket} \ No newline at end of file diff --git a/paper-new/paper.pdf b/paper-new/paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..83ab632d61102a7132a4b7a5f40e7b530ba1117d Binary files /dev/null and b/paper-new/paper.pdf differ diff --git a/paper-new/paper.tex b/paper-new/paper.tex index b41c9727520a74a016b70e6c54b05c3b5e50bc47..2e11bd1c66740cc5c962d3caa8cc8ae11d4912a7 100644 --- a/paper-new/paper.tex +++ b/paper-new/paper.tex @@ -1,6 +1,13 @@ \documentclass[acmsmall,screen]{acmart} \usepackage{mathpartir} \usepackage{tikz-cd} +\usepackage{enumitem} +\usepackage{wrapfig} +\usepackage{fancyvrb} +\usepackage{comment} + +\usepackage{ stmaryrd } + %% Rights management information. This information is sent to you %% when you complete the rights form. These commands have SAMPLE @@ -20,22 +27,8 @@ \acmPrice{15.00} \acmISBN{978-1-4503-XXXX-X/18/06} -\newcommand{\dyn}{?} -\newcommand{\ltdyn}{\sqsubseteq} - -\newcommand{\uarrowl}{\mathrel{\rotatebox[origin=c]{-30}{$\leftarrowtail$}}} -\newcommand{\uarrowr}{\mathrel{\rotatebox[origin=c]{60}{$\leftarrowtail$}}} -\newcommand{\darrowl}{\mathrel{\rotatebox[origin=c]{30}{$\twoheadleftarrow$}}} -\newcommand{\darrowr}{\mathrel{\rotatebox[origin=c]{120}{$\twoheadleftarrow$}}} -\newcommand{\vuarrow}{\mathrel{\rotatebox[origin=c]{-90}{$\leftarrowtail$}}} -\newcommand{\vdarrow}{\mathrel{\rotatebox[origin=c]{90}{$\twoheadleftarrow$}}} -\newcommand{\up}[2]{\langle{#2}\uarrowl{#1}\rangle} -\newcommand{\dn}[2]{\langle{#1}\darrowl{#2}\rangle} +\input{defs} -\newcommand{\later}{\vartriangleright} -\newcommand{\type}{\texttt{Type}} -\newcommand{\lob}{\text{L\"{o}b}} -\newcommand{\tick}{\texttt{tick}} \begin{document} @@ -84,144 +77,747 @@ the statically-typed terms are type checked at compile time, while type checking for the dynamically-typed terms is deferred to runtime. - Gradually-typed languages support the smooth migration from dynamic typing - to static typing, in that the programmer can initially leave off the - typing annotations and provide them later without altering the meaning of the - program. Moreover, a key feature of gradually typed languages is that the interaction + Gradually-typed languages should satisfy two intuitive properties. + First, the interaction between the static and dynamic components of the codebase should be safe -- i.e., should preserve the guarantees made by the static types. In other words, in the static portions of the codebase, type soundness must be preserved. + Second, gradual langugaes should support the smooth migration from dynamic typing + to static typing, in that the programmer can initially leave off the + typing annotations and provide them later without altering the meaning of the + program. + + % Graduality property + Formally speaking, gradually typed languages should satisfy the + \emph{dynamic gradual guarantee}, originally defined by Siek, Vitousek, Cimini, + and Boyland \cite{siek_et_al:LIPIcs:2015:5031}. + This property is also referred to as \emph{graduality}, by analogy with parametricity. + Intuitively, graduality says that in going from a dynamic to static style should not + introduce changes in the meaning of the program. + More specifically, making the types more precise by adding annotations + % (replacing $\dyn$ with a more specific type + %such as integers) + will either result in the same behavior as the less precise program, + or result in a type error. + + \subsection{Current Approaches} + Current approaches to proving graduality include the methods of Abstracting Gradual Typing + \cite{garcia-clark-tanter2016} and the formal tools of the Gradualier \cite{cimini-siek2016}. + These allow the language developer to start with a statically typed langauge and derive a + gradually typed language that satisfies the gradual guarantee. The downside is that + not all gradually typed languages can be derived from these frameworks, and moreover, in both + approaches the semantics is derived from the static type system as opposed to the alternative + in which the semantics determines the type checking. Without a clear semantic interpretation of type + dynamism, it becomes difficult to extend these techniques to new language features such as polymorphism. + % Proving graduality via LR + New and Ahmed have developed a semantic approach to specifying type dynamism in term of + embedding-projection pairs, which allows for a particularly elegant formulation of the + gradual guarantee. + Moreover, this approach allows for type-based reasoning using $\eta$-equivalences + + The downside of the above approach is that each new language requires a different logical relation + to prove graduality. As a result, many developments using this approach require vast effort, + with many such papers having 50+ pages of proofs. + Our aim here is that by mechanizing a graduality proof in a reusable way, + we will provide a framework for other language designers to use to ensure that their languages satsify graduality. + + + One approach currently used to prove graduality uses the technique of \emph{logical relations}. + Specifially, a logical relation is constructed and shown to be sound with respect to + observational approximation. Because the dynamic type is modeled as a non-well-founded + recursive type, the logical relation needs to be paramterized by natural numbers to restore well-foundedness. + This technique is known as a \emph{step-indexed logical relation} \cite{TODO}. + Reasoning about step-indexed logical relations + can be tedious and error-prone, and there are some very subtle aspects that must + be taken into account in the proofs. Figure \ref{TODO} shows an example of a step-indexed logical + relation for the gradually-typed lambda calculus. + + % TODO move to another section? + % Difficulties in prior semantics + % - two separate logical relations for expressions + % - transitivity + In particular, the prior approach of New and Ahmed requires two separate logical + relations for terms, one in which the steps of the left-hand term are counted, + and another in which the steps of the right-hand term are counted \cite{TODO}. + Then two terms $M$ and $N$ are related in the ``combined'' logical relation if they are + related in both of the one-sided logical relations. Having two separate logical relations + complicates the statement of the lemmas used to prove graduality, becasue any statement that + involves a term stepping needs to take into account whether we are counting steps on the left + or the right. Some of the differences can be abstracted over, but difficulties arise for properties %/results + as fundamental and seemingly straightforward as transitivty. + + Specifically, for transitivity, we would like to say that if $M$ is related to $N$ at + index $i$ and $N$ is related to $P$ at index $i$, then $M$ is related to $P$ at $i$. + But this does not actually hold: we requrie that one of the two pairs of terms + be related ``at infinity'', i.e., that they are related at $i$ for all $i \in \mathbb{N}$. + Which pair is required to satisfy this depends on which logical relation we are considering, + (i.e., is it counting steps on the left or on the right), + and so any argument that uses transitivity needs to consider two cases, one + where $M$ and $N$ must be shown to be related for all $i$, and another where $N$ and $P$ must + be related for all $i$. This may not even be possible to show in some scenarios! + + These complications introduced by step-indexing lead one to wonder whether there is a + way of proving graduality without relying on tedious arguments involving natural numbers. + An alternative approach, which we investigate in this paper, is provided by + \emph{synthetic guarded domain theory}, as discussed below. + Synthetic guarded domain theory allows the resulting logical relation to look almost + identical to a typical, non-step-indexed logical relation. + + \subsection{Contributions} + Our main contribution is a framework in Guarded Cubical Agda for proving graduality of a cast calculus. + To demonstrate the feasability and utility of our approach, we have used the framework to prove + graduality for the simply-typed gradual lambda calculus. Along the way, we have developed an intensional theory + of graduality that is of independent interest. + + + \subsection{Proving Graduality in SGDT} + % TODO move to technical background + + % Cast calculi In a gradually-typed language, the mixing of static and dynamic code is seamless, in that the dynamically typed parts are checked at runtime. This type checking occurs at the elimination forms of the language (e.g., pattern matching, field reference, etc.). Gradual languages are generally elaborated to a \emph{cast calculus}, in which the dynamic type checking is made explicit through the insertion of \emph{type casts}. - ... - % Up and down casts - There is a relation $\ltdyn$ on types such that $A \ltdyn B$ means that $A$ is a \emph{more - precise} type than $B$. + In a cast calculus, there is a relation $\ltdyn$ on types such that $A \ltdyn B$ means that + $A$ is a \emph{more precise} type than $B$. There a dynamic type $\dyn$ with the property that $A \ltdyn \dyn$ for all $A$. - + % If $A \ltdyn B$, a term $M$ of type $A$ may be \emph{up}casted to $B$, written $\up A B M$, and a term $N$ of type $B$ may be \emph{down}casted to $A$, written $\dn A B N$. Upcasts always succeed, while downcasts may fail at runtime. + % + % Syntactic term precision + We also have a notion of \emph{syntatcic term precision}. + If $A \ltdyn B$, and $M$ and $N$ are terms of type $A$ and $B$ respectively, we write + $M \ltdyn N : A \ltdyn B$ to mean that $M$ is more precise than $N$, i.e., $M$ and $N$ + behave the same except that $M$ may error more. + + % Modeling the dynamic type as a recursive sum type? + % Observational equivalence and approximation? + + In this paper, we will be using SGDT techniques to prove graduality for a particularly + simple gradually-typed cast calculus, the gradually-typed lambda calculus. This is just + the usual simply-typed lambda calculus with a dynamic type $\dyn$ such that $A \ltdyn\, \dyn$ for all types $A$, + as well as upcasts and downcasts between any types $A$ and $B$ such that $A \ltdyn B$. + The complete definition will be provided in Section \ref{sec:GTLC}. + + Our main theorem is the following: - % Graduality property - A key property that any gradually-typed language should satisfy is the \emph{dynamic gradual guarantee}, - originally defined by Siek, Vitousek, Cimini, and Boyland \cite{TODO}. - New et al refer to this property as \emph{graduality}, by analogy with parametricity. - Intuitively, graduality says that in going from a dynamic to static style - should not introduce changes in the meaning of the program. - More specifically, making the types more precise (replacing $\dyn$ with a more specific type - such as integers) will either result in the same behavior as the less precise program, - or cause a dynamic type error. - - % Observational equivalence and approximation - % TODO + % \begin{theorem}[Graduality] + % If $M \ltdyn N : \nat$, then either: + % \begin{enumerate} + % \item $M = \mho$ + % \item $M = N = \zro$ + % \item $M = N = \suc n$ for some $n$ + % \item $M$ and $N$ diverge + % \end{enumerate} + % \end{theorem} - \subsection{Prior Approach} - % Proving graduality via LR - The usual approach to proving graduality uses the technique of \emph{logical relations}. - Specifially, a logical relation is constructed and showed to be sound with respect to - observational approximation. Because the dynamic type is modeled as a recursive type, - the logical relation needs to be paramterized by natural numbers, a so-called - step-indexed logical relation. Reasoning about step-indexed logical relations - is tedious and error-prone, and there are several very subtle aspects that must - be taken into account in the proofs. - % Difficulties in prior semantics - % TODO + \begin{theorem}[Graduality] + If $\cdot \vdash M \ltdyn N : \nat$, then + \begin{enumerate} + \item If $N = \mho$, then $M = \mho$ + \item If $N = `n$, then $M = \mho$ or $M = `n$ + \item If $M = V$, then $N = V$ + \end{enumerate} + \end{theorem} + + We also should be able to show that $\mho$, $\zro$, and $\suc\, N$ are not equal. + + Our first step toward proving graduality is to formulate an \emph{intensional} gradual lambda calculus, + which we call $\intlc$, in which the computation steps taken by a term are made explicit. + The ``normal'' graudal lambda calculus for which we want to prove graduality will be called the + \emph{extensional} gradual lambda calculus, denoted $\extlc$. We will define an erasure function + $\erase{\cdot} : \intlc \to \extlc$ which takes a program in the intensional lambda calculus + and ``forgets'' the syntactic information about the steps to produce a term in the extensional calculus. + + Every term $M_e$ in $\extlc$ will have a corresponding program $M_i$ in $\intlc$ such that + $\erase{M_i} = M_e$. Moreover, we will show that if $M_e \ltdyn_e M_e'$ in the extensional theory, + then there exists terms $M_i$ and $M_i'$ such that $\erase{M_i}=M_e$, $\erase{M_i'}=M_e'$ and + $M_i \ltdyn_i M_i'$ in the intensional theory. + + We formulate and prove an analogous graduality theorem for the intensional lambda calculus. + We define an interpretation of the intensional lambda calculus into a model in which we prove + various results. Using the observation above, given $M_e \ltdyn M_e' : \nat$, we can find + intensional programs $M_i$ and $M_i'$ that erase to them and are such that $M_i \ltdyn M_i'$. + We will then apply the intensional graduality theorem to $M_i$ and $M_i'$, and translate the result + back to $M_e$ and $M_e'$. - % - two separate logical relations for expressions - % - transitivity - - % synthetic guarded domain theory, denotational semantics therein - \subsection{Synthetic Guarded Domain Theory} - One way to avoid the tedious reasoning associated with step-indexing is to work - axiomatically inside of a logical system that can reason about non-well-founded recursive - constructions while abstracting away the specific details of step-indexing required - if we were working analytically. - The system that proves useful fot this purpose is called \emph{synthetic guarded - domain theory}, or SGDT for short. We provide a brief overview here, but more - details can be found in \cite{TODO}. + \subsection{Overview of Remainder of Paper} - In SGDT, there is the notion of a \emph{clock}, an abstract type that tracks the - passage of time through \emph{ticks}. Given a clock $k$, a tick $t : \tick k$ serves - as evidence that one unit of time has passed according to the clock $k$. + In Section \ref{sec:technical-background}, we provide technical background on gradually typed languages and + on synthetic guarded domain theory. + % + In Section \ref{sec:GTLC}, we introduce the gradually-typed cast calculus + for which we will prove graduality. Important here are the notions of syntactic + type precision and term precision. We introduce both the extensional gradual lambda calculus + ($\extlc$) and the intensional gradual lambda calculus ($\intlc$). +% + In Section \ref{sec:domain-theory}, we define several fundamental constructions + internal to SGDT that will be needed when we give a denotational semantics to + our intensional lambda calculus. + This includes the notion of Predomains as well as the concept + of EP-Pairs. +% + In Section \ref{sec:semantics}, we define the denotational semantics for the + intensional gradually-typed lambda calculus using the domain theoretic constructions in the + previous section. +% + In Section \ref{sec:graduality}, we outline in more detail the proof of graduality for the + extensional gradual lambda calculus. +% + In Section \ref{sec:discussion}, we discuss the benefits and drawbacks to our approach in comparison + to the traditional step-indexing approach, as well as possibilities for future work. - SGDT also introduces a modality $\later : \type \to \type$, (pronounced ``later''). - Given a clock $k$ and type $T$, the type $\later_k T$ represents a value of type - $T$ one unit of time in the future according to clock $k$. - The type $\later T$ is represented as a function from ticks of a clock $k$ to $T$. - There is an axiom called $\lob$-induction which is stated as follows: - \[ - \lob : \forall T, (\later T \to T) \to T. - \] +\section{Technical Background}\label{sec:technical-background} - That is, to construct a term of type $T$, it suffices to assume that we have access to - such a term ``later'' and use that to help us build a term ``now''. +% synthetic guarded domain theory, denotational semantics therein +\subsection{Synthetic Guarded Domain Theory} +One way to avoid the tedious reasoning associated with step-indexing is to work +axiomatically inside of a logical system that can reason about non-well-founded recursive +constructions while abstracting away the specific details of step-indexing required +if we were working analytically. +The system that proves useful for this purpose is called \emph{synthetic guarded +domain theory}, or SGDT for short. We provide a brief overview here, but more +details can be found in \cite{TODO}. - In particular, this axiom applies to propositions $P : \texttt{Prop}$. +SGDT offers a synthetic approach to domain theory that allows for guarded recursion +to be expressed syntactically via a type constructor $\later : \type \to \type$ +(pronounced ``later''). The use of a modality to express guarded recursion +was introduced by Nakano \cite{TODO}. +Given a type $A$, the type $\later A$ represents an element of type $A$ +that is available one time step later. There is an operator $\nxt : A \to\, \later A$ +that ``delays'' an element available now to make it available later. +We will use a tilde to denote a term of type $\later A$, e.g., $\tilde{M}$. +% TODO later is an applicative functor, but not a monad +There is a fixpoint operator +\[ + \fix : \forall T, (\later T \to T) \to T. +\] +That is, to construct a term of type $T$, it suffices to assume that we have access to +such a term ``later'' and use that to help us build a term ``now''. +This operator satisfies the axiom that $\fix f = f (\nxt (\fix f))$. +In particular, this axiom applies to propositions $P : \texttt{Prop}$; proving +a statement in this manner is known as $\lob$-induction. +In SGDT, there is also a new sort called \emph{clocks}. A clock serves as a reference +relative to which the constructions described above are carried out. +For instance, given a clock $k$ and type $T$, the type $\later^k T$ represents a value of type +$T$ one unit of time in the future according to clock $k$. +If we only ever had one clock, then we would not need to bother defining this notion. +However, the notion of \emph{clock quantification} is crucial for encoding coinductive types using guarded +recursion, an idea first introduced by Atkey and McBride \cite{TODO}. - \subsection{Overview of Remainder of Paper} - In Section \ref{sec:GTLC}, we introduce the gradually-typed cast calculus - for which we will prove graduality. Important here are the notions of syntactic - type precision and term precision. - In Section \ref{sec:domain-theory}, we define several fundamental constructions - internal to SGDT that will be needed when we give a denotational semantics to - the language. This includes the notion of Predomains as well as the concept - of EP-Pairs. +% Clocked Cubical Type Theory +\subsubsection{Ticked Cubical Type Theory} +% TODO motivation for Clocked Cubical Type Theory, e.g., delayed substitutions + +In Ticked Cubical Type Theory \cite{TODO}, there is an additional sort +called \emph{ticks}. Given a clock $k$, a tick $t : \tick k$ serves +as evidence that one unit of time has passed according to the clock $k$. +The type $\later A$ is represented as a function from ticks of a clock $k$ to $A$. +The type $A$ is allowed to depend on $t$, in which case we write $\later^k_t A$ +to emphasize the dependence. + +% TODO next as a function that ignores its input tick argument + +The rules for tick abstraction and application are similar to those of dependent +$\Pi$ types. +In particular, if we have a term $M$ of type $\later^k A$, and we +have available in the context a tick $t' : \tick k$, then we can apply the tick to $M$ to get +a term $M[t'] : A[t'/t]$. We will also write tick application as $M_t$. +Conversely, if in a context $\Gamma, t : \tick k$ we have that $M$ has type $A$, +then in context $\Gamma$ we have $\lambda t.M$ has type $\later A$. % TODO dependent version? + +The statements in this paper have been formalized in a variant of Agda called +Guarded Cubical Agda \cite{TODO}, an implementation of Clocked Cubical Type Theory. + + +% TODO axioms (clock irrelevance, tick irrelevance) + + +% TODO topos of trees model +\subsection{The Topos of Trees Model} + +The topos of trees $\calS$ is the presheaf category $\Set^{\omega^o}$. + + We assume a universe $\calU$ of types, with encodings for operations such + as sum types (written as $\widehat{+}$). There is also an operator + $\laterhat \colon \later \calU \to \calU$ such that + $\El(\laterhat(\nxt A)) =\,\, \later \El(A)$, where $\El$ is the type corresponding + to the code $A$. + + An object $X$ is a family $\{X_i\}$ of sets indexed by natural numbers, + along with restriction maps $r^X_i \colon X_{i+1} \to X_i$. + + A morphism from $\{X_i\}$ to $\{Y_i\}$ is a family of functions $f_i \colon X_i \to Y_i$ + that commute with the restriction maps in the obvious way, that is, + $f_i \circ r^X_i = r^Y_i \circ f_{i+1}$. + + The type operator $\later$ is defined on an object $X$ by + $(\later X)_0 = 1$ and $(\later X)_{i+1} = X_i$. + The restriction maps are given by $r^\later_0 =\, !$, where $!$ is the + unique map into $1$, and $r^\later_{i+1} = r^X_i$. + The morphism $\nxt^X \colon X \to \later X$ is defined pointwise by + $\nxt^X_0 =\, !$, and $\nxt^X_{i+1} = r^X_i$. + + + Given a morphism $f \colon \later X \to X$, we define $\fix f$ pointwise + as $\fix_i(f) = f_{i} \circ \dots \circ f_0$. + + % TODO global elements - In Section \ref{Semantics}, we define the denotational semantics for the gradually-typed - language \section{GTLC}\label{sec:GTLC} +Here we describe the syntax and typing for the graudally-typed lambda calculus. +We also give the rules for syntactic type and term precision. + +We start with the extensional lambda calculus $\extlc$, and then describe the additions +necessary for the intensional lambda calculus $\intlc$. + \subsection{Syntax} + +\begin{align*} + &\text{Types } A, B := \nat, \, \dyn, \, (A \To B) \\ + &\text{Terms } M, N := \err_A, \, \zro, \, \suc\, M, \, (\lda{x}{M}), \, (M\, N), \, (\up{A}{B} M), \, (\dn{A}{B} M) \\ + &\text{Contexts } \Gamma := \cdot, \, (\Gamma, x : A) +\end{align*} + +The typing rules are as expected, with a cast between $A$ to $B$ allowed only when $A \ltdyn B$. + +\begin{mathpar} + \inferrule*{ }{\hasty \Gamma {\err_A} A} + + \inferrule*{ }{\hasty \Gamma \zro \nat} + + \inferrule {\hasty \Gamma M \nat} {\hasty \Gamma {\suc\, M} \nat} + + \inferrule* + {\hasty {\Gamma, x : A} M B} + {\hasty \Gamma {\lda x M} {A \To B}} + + \inferrule * + {\hasty \Gamma M {A \To B} \and \hasty \Gamma N A} + {\hasty \Gamma {M \, N} B} + + \inferrule* + {A \ltdyn B \and \hasty \Gamma M A} + {\hasty \Gamma {\up A B M} B } + + \inferrule* + {A \ltdyn B \and \hasty \Gamma M B} + {\hasty \Gamma {\dn A B M} A} +\end{mathpar} + +The equational theory is also as expected, with $\beta$ and $\eta$ laws for function type. + \subsection{Type Precision} +The rules for type precision are as follows: + +\begin{mathpar} + \inferrule*[right = \dyn] + { }{\dyn \ltdyn \dyn} + + \inferrule*[right = \nat] + { }{\nat \ltdyn \nat} + + \inferrule*[right = $Inj_\nat$] + { }{\nat \ltdyn \dyn} + + \inferrule*[right = $\To$] + {A_i \ltdyn B_i \and A_o \ltdyn B_o } + {(A_i \To A_o) \ltdyn (B_i \To B_o)} + + \inferrule*[right=$Inj_{\To}$] + {(A_i \to A_o) \ltdyn (\dyn \To \dyn)} + {(A_i \to A_o) \ltdyn\, \dyn} + + +\end{mathpar} + +% Type precision derivations +Note that as a consequence of this presentation of the type precision rules, we +have that if $A \ltdyn B$, there is a unique precision derivation that witnesses this. +As in previous work, we go a step farther and make these derivations first-class objects, +known as \emph{type precision derivations}. +Specifically, for every $A \ltdyn B$, we have a derivation $d : A \ltdyn B$ that is constructed +using the rules above. For instance, there is a derivation $\dyn : \dyn \ltdyn \dyn$, and a derivation +$\nat : \nat \ltdyn \nat$, and if $d_i : A_i \ltdyn B_i$ and $d_o : A_o \ltdyn B_o$, then +there is a derivation $d_i \To d_o : (A_i \To A_o) \ltdyn (B_i \To B_o)$. Likewise for +the remaining two rules. The benefit to making these derivations explicit in the syntax is that we +can perform induction over them. +Note also that for any type $A$, we use $A$ to denote the reflexivity derivation that $A \ltdyn A$, +i.e., $A : A \ltdyn A$. +Finally, observe that for type precision derivations $d : A \ltdyn B$ and $d' : B \ltdyn C$, we +can define their composition $d' \circ d : A \ltdyn C$. This will be used in the statement +of transitivity of the term precision relation. + \subsection{Term Precision} +We allow for a \emph{heterogeneous} term precision judgment on terms $M$ of type $A$ and $N$ of type $B$, +provided that $A \ltdyn B$ holds. + +% Type precision contexts +In order to deal with open terms, we will need the notion of a type precision \emph{context}, which we denote +$\gamlt$. This is similar to a normal context but instead of mapping variables to types, +it maps variables $x$ to related types $A \ltdyn B$, where $x$ has type $A$ in the left-hand term +and $B$ in the right-hand term. We may also write $x : d$ where $d : A \ltdyn B$ to indicate this. +Another way of thinking of type precision contexts is as a zipped pair of contexts $\Gamma^l, \Gamma^r$ +with the same domain such that $\Gamma^l(x) \ltdyn \Gamma^r(x)$ for each $x$ in the domain. + +The rules for term precision come in two forms. We first have the \emph{congruence} rules, +one for each term constructor. These assert that the term constructors respect term precision. +The congruence rules are as follows: + + +\begin{mathpar} + \inferrule*[right = $\err$] + { \hasty \Gamma M A } + {\etmprec {\gamlt} {\err_A} M A} + + \inferrule*[right = Var] + { d : A \ltdyn B \and \gamlt(x) = (A, B) } { \etmprec {\gamlt} x x d } + + \inferrule*[right = Zro] + { } {\etmprec \gamlt \zro \zro \nat } + + \inferrule*[right = Suc] + { \etmprec \gamlt M N \nat } {\etmprec \gamlt {\suc\, M} {\suc\, N} \nat} + + \inferrule*[right = Lambda] + { d_i : A_i \ltdyn B_i \and + d_o : A_o \ltdyn B_o \and + \etmprec {\gamlt , x : d_i} M N {d_o} } + { \etmprec \gamlt {\lda x M} {\lda x N} (d_i \To d_o) } + + \inferrule*[right = App] + { d_i : A_i \ltdyn B_i \and + d_o : A_o \ltdyn B_o \\\\ + \etmprec \gamlt {M} {M'} (d_i \To d_o) \and + \etmprec \gamlt {N} {N'} d_i + } + { \etmprec \gamlt {M\, N} {M'\, N'} {d_o}} +\end{mathpar} + +We then have additional equational axioms, including transitivity, $\beta$ and $\eta$ laws, and +rules characterizing upcasts as least upper bounds, and downcasts as greatest lower bounds. + +We write $M \equidyn N$ to mean that both $M \ltdyn N$ and $N \ltdyn M$. + +% TODO +\begin{mathpar} + \inferrule*[right = Transitivity] + {\etmprec \gamlt M N d \and + \etmprec \gamlt N P {d'} } + {\etmprec \gamlt M P {d' \circ d} } + + \inferrule*[right = $\beta$] + { \hasty {\Gamma, x : A_i} M {A_o} \and + \hasty {\Gamma} V {A_i} } + { \etmequidyn \gamlt {(\lda x M)\, V} {M[V/x]} {A_o} } + + \inferrule*[right = $\eta$] + { \hasty {\Gamma} {V} {A_i \To A_o} } + { \etmequidyn \gamlt {\lda x (V\, x)} V {A_i \To A_o} } + + \inferrule*[right = UpR] + { d : A \ltdyn B \and + \hasty \Gamma M A } + { \etmprec \gamlt M {\up A B M} d } + + \inferrule*[right = UpL] + { d : A \ltdyn B \and + \etmprec \gamlt M N d } + { \etmprec \gamlt {\up A B M} N B } + + \inferrule*[right = DnL] + { d : A \ltdyn B \and + \hasty \Gamma M B } + { \etmprec \gamlt {\dn A B M} M d } + + \inferrule*[right = DnR] + { d : A \ltdyn B \and + \etmprec \gamlt M N d } + { \etmprec \gamlt M {\dn A B N} A } +\end{mathpar} + +% TODO explain the least upper bound/greatest lower bound rules +The rules UpR, UpL, DnL, and DnR were introduced in \cite{TODO} as a means +of cleanly axiomatizing the intended behavior of casts in a way that +doesn't depend on the specific constructs of the language. +Intuitively, rule UpR says that the upcast of $M$ is an upper bound for $M$ +in that $M$ may error more, and UpL says that the upcast is the \emph{least} +such upper bound, in that it errors more than any other upper bound for $M$. +Conversely, DnL says that the downcast of $M$ is a lower bound, and DnR says +that it is the \emph{greatest} lower bound. +% These rules provide a clean axiomatization of the behavior of casts that doesn't +% depend on the specific constructs of the language. + + +\subsection{The Intensional Lambda Calculus} + +Now that we have described the syntax along with the type and term precision judgments for +$\extlc$, we can now do the same for $\intlc$. One key difference between the two calculi +is that we define $\intlc$ using the constructs available to us in the language of synthetic +guarded domain theory, e.g., we use the $\later$ operator. Whereas when we defined the syntax +of the extensional lambda calculus we were working in the category of sets, when we define the +syntax of the intensional lambda calculus we will be working in the topos of trees. +% in a presheaf category known as the \emph{topos of trees}. + +More specifically, in $\intlc$, we not only have normal terms, but also terms available +``later'', which we denote by $\tilde{M}$. +We have a term constructor $\theta_s$ which takes a later-term and turns it into +a term avaialble now. +The typing and precision rules for $\theta_s$ involve the $\later$ operator, as shown below. +Observe that $\theta_s$ is a syntactic analogue to the $\theta$ constructor of the lifting monad +that we will define in the section on domain theoretic constructions (Section \ref{sec:domain-theory}), +but it is important to note that $\theta_s (\tilde{M})$ is an actual term in $\intlc$, whereas the +$\theta$ constructor is a purely semantic construction. These will be connected when we discuss the +interpretation of $\intlc$ into the semantic model. + +% Most aspects of the two calculi are the same, except that in $\intlc$ we add a new term constructor +% $\theta_s$ which represents a syntactic way of delaying a term by one computation step. + +To better understand this situation, note that $\lda{x}{(\cdot)}$ can be viewed as a function +(at the level of the ambient type theory) from terms of type $B$ under context $\Gamma, x : A$ +to terms of type $A \To B$ under context $\Gamma$. Similarly, we can view $\theta_s(\cdot)$ as a +function (in the ambient type theory, which is sythetic guarded domain theory) taking terms +$\tilde{M}$ of type $A$ avaiable \emph{later} to terms of type $A$ available \emph{now}. + +Notice that the notion of a ``term available later'' does not need to be part of the syntax +of the intensional lambda calculus, because this can be expressed in the ambient theory. +Similarly, we do not need a syntactic form to delay a term, because we can simply use $\nxt$. + + +\begin{align*} + % &\text{Later Terms } \tilde{M}, \tilde{N} + &\text{Terms } M, N := \err_A, \, \dots \, \theta_s (\tilde{M}) \\ +\end{align*} + +\begin{mathpar} + \inferrule*[] + { \later_t (\hasty \Gamma {M_t} A) } + { \hasty \Gamma {\theta_s M} {A} } +\end{mathpar} + +\begin{mathpar} + \inferrule*[] + { \later_t (\itmprec \gamlt {M_t} {N_t} d) } + { \itmprec \gamlt {\theta_s M} {\theta_s N} d } +\end{mathpar} + +Recall that $\later_t$ is a dependent form of $\later$ where the arugment is allowed +to mention $t$. In particular, here we apply the tick $t$ to the later-terms $M$ and $N$ to get +``now"-terms $M_t$ and $N_t$. + +Formally speaking, the term precision relation must be defined as a guarded fixpoint, +i.e., we assume that the function is defined later, and use it to construct a definition +that is defined ``now''. This involves applying a tick to the later-function to shift it +to a now-function. Indeed, this is what we do in the formal Agda development, but in this +paper, we will elide these issues as they are not relevant to the main ideas. + +% TODO equational theory + + + \section{Domain-Theoretic Constructions}\label{sec:domain-theory} +In this section, we discuss the fundamental objects of the model into which we will embed +the intensional lambda calculus and inequational theory. It is important to remember that +the constructions in this section are entirely independent of the syntax described in the +previous section; the notions defined here exist in their own right as purely mathematical +constructs. In the next section, we will link the syntax and semantics via an interpretation +function. + +\subsection{The Lift Monad} + +When thinking about how to model intensional gradually-typed programs, we must consider +their possible behaviors. On the one hand, we have \emph{failure}: a program may fail +at run-time because of a type error. In addition to this, a program may ``think'', +i.e., take a step of computation. If a program thinks forever, then it never returns a value, +so we can think of the idea of thinking as a way of intensionally modelling \emph{partiality}. + +With this in mind, we can describe a semantic object that models these behaviors: a monad +for embedding computations that has cases for failure and ``thinking''. +Previous work has studied such a construct in the setting of the latter only, called the lift +monad \cite{TODO}; here, we add the additional effect of failure. + +For a type $A$, we define the \emph{lift monad with failure} $\li A$, which we will just call +the \emph{lift monad}, as the following datatype: + +\begin{align*} + \li A &:= \\ + &\eta \colon A \to \li A \\ + &\mho \colon \li A \\ + &\theta \colon \later (\li A) \to \li A +\end{align*} + +Formally, the lift monad $\li A$ is defined as the solution to the guarded recursive type equation + +\[ \li A \cong A + 1 + \later \li A. \] + +An element of $\li A$ should be viewed as a computation that can either (1) return a value (via $\eta$), +(2) raise an error and stop (via $\mho$), or (3) think for a step (via $\theta$). + +Notice there is a computation $\fix \theta$ of type $\li A$. This represents a computation +that thinks forever and never returns a value. + +Since we claimed that $\li A$ is a monad, we need to define the monadic operations +and show that they repect the monadic laws. The return is just $\eta$, and extend +is defined via by guarded recursion by cases on the input. +% It is instructive to give at least one example of a use of guarded recursion, so +% we show below how to define extend: +% TODO +% +% +Verifying that the monadic laws hold requires \lob-induction and is straightforward. + +The lift monad has the following universal property. Let $f$ be a function from $A$ to $B$, +where $B$ is a $\later$-algebra, i.e., there is $\theta_B \colon \later B \to B$. +Further suppose that $B$ is also an ``error-algebra'', that is, an algebra of the +constant functor $1 \colon \text{Type} \to \text{Type}$ mapping all types to Unit. +This latter statement amounts to saying that there is a map $\text{Unit} \to B$, so $B$ has a +distinguished ``error element" $\mho_B \colon B$. + +Then there is a unique homomorphism of algebras $f' \colon \li A \to B$ such that +$f' \circ \eta = f$. The function $f'(l)$ is defined via guarded fixpoint by cases on $l$. +In the $\mho$ case, we simply return $\mho_B$. +In the $\theta(\tilde{l})$ case, we will return + +\[\theta_B (\lambda t . (f'_t \, \tilde{l}_t)). \] + +Recalling that $f'$ is a guaded fixpoint, it is available ``later'' and by +applying the tick we get a function we can apply ``now''; for the argument, +we apply the tick to $\tilde{l}$ to get a term of type $\li A$. + + +\subsubsection{Model-Theoretic Description} +We can describe the lift monad in the topos of trees model as follows. + + \subsection{Predomains} +The next important construction is that of a \emph{predomain}. A predomain is intended to +model the notion of error ordering that we want terms to have. Thus, we define a predomain $A$ +as a partially-ordered set, which consists of a type which we denote $\ty{A}$ and a reflexive, +transitive, and antisymmetric relation $\le_P$ on $A$. + +For each type we want to represent, we define a predomain for the corresponding semantic +type. For instance, we define a predomain for natural numbers, a predomain for the +dynamic type, a predomain for functions, and a predomain for the lift monad. We +describe each of these below. + +We define monotone functions between predomain as expected. Given predomains +$A$ and $B$, we write $f \colon A \monto B$ to indicate that $f$ is a monotone +function from $A$ to $B$, i.e, for all $a_1 \le_A a_2$, we have $f(a_1) \le_B f(a_2)$. + +% TODO +\begin{itemize} + \item There is a predomain Nat for natural numbers, where the ordering is equality. + + \item There is a predomain Dyn to represent the dynamic type. The underlying type + for this predomain is defined to be $\mathbb{N} + \later (Dyn \monto Dyn)$. + This definition is valid because the occurrences of Dyn are guarded by the $\later$. + The ordering is defined via guarded recursion by cases on the argument, using the + ordering on $\mathbb{N}$ and the ordering on monotone functions described below. + + \item For a predomain $A$, there is a predomain $\li A$ that is the ``lift'' of $A$ + using the lift monad. We use the same notation for $\li A$ when $A$ is a type + and $A$ is a predomain, since the context should make clear which one we are referring to. + The underling type of $\li A$ is simply $\li \ty{A}$, i.e., the lift of the underlying + type of $A$. + The ordering of $\li A$ is the ``lock-step error-ordering'' which we describe in \ref{subsec:lock-step}. + + \item For predomains $A_i$ and $A_o$, we form the predomain of monotone functions + from $A_i$ to $A_o$, which we denote by $A_i \To A_o$. + Two such functions are related +\end{itemize} + +Predomains will form the objects of a structure similar to a double category, +with horizontal morphisms being monotone funcitons, and vertical morphisms being +embedding-projection pairs discussed below. + + + +\subsection{Lock-step Error Ordering}\label{subsec:lock-step} + +As mentioned, the ordering on the lift of a predomain $A$ +The relation is parameterized by an ordering $\le_A$ on $A$. +We call this the lock-step error-ordering, the idea being that two computations +$l$ and $l'$ are related if they are in lock-step with regard to their +intensional behavior. That is, if $l$ is $\eta x$, then $l'$ should be equal to $\eta y$ +for some $y$ such that $x \le_A y$. + \subsection{Weak Bisimilarity Relation} + + +\subsection{Combined Ordering} + + + + + + + \subsection{EP-Pairs} +The use of embedding-projection pairs in gradual typing was introduced by New and +Ahmed \cite{TODO}. +Here, we want to adapt this notion of embedding-projection pair to the setting +of intensional denotaional semantics. + + + + \section{Semantics}\label{sec:semantics} \subsection{Types as Predomains} + \subsection{Terms as Monotone Functions} + \subsection{Type Precision as EP-Pairs} -\subsection{Term Precision} + +\subsection{Term Precision via the Lock-Step Error Ordering} % Homogeneous vs heterogeneous term precision \section{Graduality}\label{sec:graduality} +\subsection{Outline} + +\subsection{Extensional to Intensional} + +\subsection{Intensional Results} + +\subsection{Adequacy} + +\subsection{Putting it Together} + \section{Discussion}\label{sec:discussion} @@ -239,4 +835,8 @@ It is possible that a combination of synthetic guarded domain theory with \emph{directed} type theory would allow for an a synthetic treatment of the error ordering as well. + +\bibliographystyle{ACM-Reference-Format} +\bibliography{references} + \end{document} \ No newline at end of file diff --git a/paper-new/references.bib b/paper-new/references.bib new file mode 100644 index 0000000000000000000000000000000000000000..d02b5c2445fb47ed51f174479fcd4286c5a7ec6a --- /dev/null +++ b/paper-new/references.bib @@ -0,0 +1,331 @@ +@article{Mannaa2020TickingCA, + title={Ticking clocks as dependent right adjoints: Denotational semantics for clocked type theory}, + author={Bassel Mannaa and Rasmus Ejlers M{\o}gelberg and Niccol{\`o} Veltri}, + journal={Log. Methods Comput. Sci.}, + year={2020}, + volume={16} +} + +@inproceedings{mogelberg-paviotti2016, +author = {M\o{}gelberg, Rasmus Ejlers and Paviotti, Marco}, +title = {Denotational Semantics of Recursive Types in Synthetic Guarded Domain Theory}, +year = {2016}, +isbn = {9781450343916}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/2933575.2934516}, +doi = {10.1145/2933575.2934516}, +abstract = {Guarded recursion is a form of recursion where recursive calls are guarded by delay modalities. Previous work has shown how guarded recursion is useful for reasoning operationally about programming languages with advanced features including general references, recursive types, countable non-determinism and concurrency.Guarded recursion also offers a way of adding recursion to type theory while maintaining logical consistency. In previous work we initiated a programme of denotational semantics in type theory using guarded recursion, by constructing a computationally adequate model of the language PCF (simply typed lambda calculus with fixed points). This model was intensional in that it could distinguish between computations computing the same result using a different number of fixed point unfoldings.In this work we show how also programming languages with recursive types can be given denotational semantics in type theory with guarded recursion. More precisely, we give a computationally adequate denotational semantics to the language FPC (simply typed lambda calculus extended with recursive types), modelling recursive types using guarded recursive types. The model is intensional in the same way as was the case in previous work, but we show how to recover extensionality using a logical relation.All constructions and reasoning in this paper, including proofs of theorems such as soundness and adequacy, are by (informal) reasoning in type theory, often using guarded recursion.}, +booktitle = {Proceedings of the 31st Annual ACM/IEEE Symposium on Logic in Computer Science}, +pages = {317-326}, +numpages = {10}, +keywords = {Synthetic Domain Theory, Denotational Semantics, Guarded Recursion, Type Theory, Recursive Types}, +location = {New York, NY, USA}, +series = {LICS '16} +} + +@inproceedings{veltri-vezzosi2020, +author = {Veltri, Niccol\`{o} and Vezzosi, Andrea}, +title = {Formalizing ðœ‹-Calculus in Guarded Cubical Agda}, +year = {2020}, +isbn = {9781450370974}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3372885.3373814}, +doi = {10.1145/3372885.3373814}, +abstract = {Dependent type theories with guarded recursion have shown themselves suitable for the development of denotational semantics of programming languages. In particular Ticked Cubical Type Theory (TCTT) has been used to show that for guarded labelled transition systems (GLTS) interpretation into the denotational semantics maps bisimilar processes to equal values. In fact the two notions are proved equivalent, allowing one to reason about equality in place of bisimilarity. We extend that result to the Ï€-calculus, picking early congruence as the syntactic notion of equivalence between processes, showing that denotational models based on guarded recursive types can handle the dynamic creation of channels that goes beyond the scope of GLTSs. Hence we present a fully abstract denotational model for the early Ï€-calculus, formalized as an extended example for Guarded Cubical Agda: a novel implementation of Ticked Cubical Type Theory based on Cubical Agda.}, +booktitle = {Proceedings of the 9th ACM SIGPLAN International Conference on Certified Programs and Proofs}, +pages = {270-283}, +numpages = {14}, +keywords = {pi-calculus, ticked cubical type theory, denotational semantics, guarded recursion}, +location = {New Orleans, LA, USA}, +series = {CPP 2020} +} + + +@INPROCEEDINGS{birkedal-mogelberg-schwinghammer-stovring2011, + author={Birkedal, Lars and Mogelberg, Rasmus Ejlers and Schwinghammer, Jan and Stovring, Kristian}, + booktitle={2011 IEEE 26th Annual Symposium on Logic in Computer Science}, + title={First Steps in Synthetic Guarded Domain Theory: Step-Indexing in the Topos of Trees}, + year={2011}, + volume={}, + number={}, + pages={55-64}, + doi={10.1109/LICS.2011.16}} + + +@INPROCEEDINGS{bahr-grathwohl-bugge-mogelberg2017, + author={Bahr, Patrick and Grathwohl, Hans Bugge and Møgelberg, Rasmus Ejlers}, + booktitle={2017 32nd Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)}, + title={The clocks are ticking: No more delays!}, + year={2017}, + volume={}, + number={}, + pages={1-12}, + doi={10.1109/LICS.2017.8005097}} + + +@inproceedings{atkey-mcbride2013, +author = {Atkey, Robert and McBride, Conor}, +title = {Productive Coprogramming with Guarded Recursion}, +year = {2013}, +isbn = {9781450323260}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/2500365.2500597}, +doi = {10.1145/2500365.2500597}, +abstract = {Total functional programming offers the beguiling vision that, just by virtue of the compiler accepting a program, we are guaranteed that it will always terminate. In the case of programs that are not intended to terminate, e.g., servers, we are guaranteed that programs will always be productive. Productivity means that, even if a program generates an infinite amount of data, each piece will be generated in finite time. The theoretical underpinning for productive programming with infinite output is provided by the category theoretic notion of final coalgebras. Hence, we speak of coprogramming with non-well-founded codata, as a dual to programming with well-founded data like finite lists and trees.Systems that offer facilities for productive coprogramming, such as the proof assistants Coq and Agda, currently do so through syntactic guardedness checkers. Syntactic guardedness checkers ensure that all self-recursive calls are guarded by a use of a constructor. Such a check ensures productivity. Unfortunately, these syntactic checks are not compositional, and severely complicate coprogramming.Guarded recursion, originally due to Nakano, is tantalising as a basis for a flexible and compositional type-based approach to coprogramming. However, as we show, by itself, guarded recursion is not suitable for coprogramming due to the fact that there is no way to make finite observations on pieces of infinite data. In this paper, we introduce the concept of clock variables that index Nakano's guarded recursion. Clock variables allow us to "close over" the generation of infinite data, and to make finite observations, something that is not possible with guarded recursion alone.}, +booktitle = {Proceedings of the 18th ACM SIGPLAN International Conference on Functional Programming}, +pages = {197-208}, +numpages = {12}, +keywords = {total functional programming, guarded recursion, coalgebras, corecursion}, +location = {Boston, Massachusetts, USA}, +series = {ICFP '13} +} + +@article{10.1145/2544174.2500597, +author = {Atkey, Robert and McBride, Conor}, +title = {Productive Coprogramming with Guarded Recursion}, +year = {2013}, +issue_date = {September 2013}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {48}, +number = {9}, +issn = {0362-1340}, +url = {https://doi.org/10.1145/2544174.2500597}, +doi = {10.1145/2544174.2500597}, +abstract = {Total functional programming offers the beguiling vision that, just by virtue of the compiler accepting a program, we are guaranteed that it will always terminate. In the case of programs that are not intended to terminate, e.g., servers, we are guaranteed that programs will always be productive. Productivity means that, even if a program generates an infinite amount of data, each piece will be generated in finite time. The theoretical underpinning for productive programming with infinite output is provided by the category theoretic notion of final coalgebras. Hence, we speak of coprogramming with non-well-founded codata, as a dual to programming with well-founded data like finite lists and trees.Systems that offer facilities for productive coprogramming, such as the proof assistants Coq and Agda, currently do so through syntactic guardedness checkers. Syntactic guardedness checkers ensure that all self-recursive calls are guarded by a use of a constructor. Such a check ensures productivity. Unfortunately, these syntactic checks are not compositional, and severely complicate coprogramming.Guarded recursion, originally due to Nakano, is tantalising as a basis for a flexible and compositional type-based approach to coprogramming. However, as we show, by itself, guarded recursion is not suitable for coprogramming due to the fact that there is no way to make finite observations on pieces of infinite data. In this paper, we introduce the concept of clock variables that index Nakano's guarded recursion. Clock variables allow us to "close over" the generation of infinite data, and to make finite observations, something that is not possible with guarded recursion alone.}, +journal = {SIGPLAN Not.}, +month = {sep}, +pages = {197-208}, +numpages = {12}, +keywords = {guarded recursion, coalgebras, total functional programming, corecursion} +} + + +@inproceedings{kristensen-mogelberg-vezzosi2022, +author = {Baunsgaard Kristensen, Magnus and Mogelberg, Rasmus Ejlers and Vezzosi, Andrea}, +title = {Greatest HITs: Higher Inductive Types in Coinductive Definitions via Induction under Clocks}, +year = {2022}, +isbn = {9781450393515}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3531130.3533359}, +doi = {10.1145/3531130.3533359}, +abstract = {We present Clocked Cubical Type Theory, the first type theory combining multi-clocked guarded recursion with the features of Cubical Type Theory. Guarded recursion is an abstract form of step-indexing, which can be used for construction of advanced programming language models. In its multi-clocked version, it can also be used for coinductive programming and reasoning, encoding productivity in types. Combining this with Higher Inductive Types (HITs) the encoding extends to coinductive types that are traditionally hard to represent in type theory, such as the type of finitely branching labelled transition systems. Among our technical contributions is a new principle of induction under clocks, providing computational content to one of the main axioms required for encoding coinductive types. This principle is verified using a denotational semantics in a presheaf model.}, +booktitle = {Proceedings of the 37th Annual ACM/IEEE Symposium on Logic in Computer Science}, +articleno = {42}, +numpages = {13}, +location = {Haifa, Israel}, +series = {LICS '22} +} + + +@article{mogelberg-veltri2019, +author = {M\o{}gelberg, Rasmus Ejlers and Veltri, Niccol\`{o}}, +title = {Bisimulation as Path Type for Guarded Recursive Types}, +year = {2019}, +issue_date = {January 2019}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {3}, +number = {POPL}, +url = {https://doi.org/10.1145/3290317}, +doi = {10.1145/3290317}, +abstract = {In type theory, coinductive types are used to represent processes, and are thus crucial for the formal verification of non-terminating reactive programs in proof assistants based on type theory, such as Coq and Agda. Currently, programming and reasoning about coinductive types is difficult for two reasons: The need for recursive definitions to be productive, and the lack of coincidence of the built-in identity types and the important notion of bisimilarity. Guarded recursion in the sense of Nakano has recently been suggested as a possible approach to dealing with the problem of productivity, allowing this to be encoded in types. Indeed, coinductive types can be encoded using a combination of guarded recursion and universal quantification over clocks. This paper studies the notion of bisimilarity for guarded recursive types in Ticked Cubical Type Theory, an extension of Cubical Type Theory with guarded recursion. We prove that, for any functor, an abstract, category theoretic notion of bisimilarity for the final guarded coalgebra is equivalent (in the sense of homotopy type theory) to path equality (the primitive notion of equality in cubical type theory). As a worked example we study a guarded notion of labelled transition systems, and show that, as a special case of the general theorem, path equality coincides with an adaptation of the usual notion of bisimulation for processes. In particular, this implies that guarded recursion can be used to give simple equational reasoning proofs of bisimilarity. This work should be seen as a step towards obtaining bisimilarity as path equality for coinductive types using the encodings mentioned above.}, +journal = {Proc. ACM Program. Lang.}, +month = {jan}, +articleno = {4}, +numpages = {29}, +keywords = {guarded recursion, labelled transition systems, bisimulation, homotopy type theory, coinductive types, Dependent types, cubical type theory, CCS} +} + + +@article{new-ahmed2018, +author = {New, Max S. and Ahmed, Amal}, +title = {Graduality from Embedding-Projection Pairs}, +year = {2018}, +issue_date = {September 2018}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {2}, +number = {ICFP}, +url = {https://doi.org/10.1145/3236768}, +doi = {10.1145/3236768}, +abstract = {Gradually typed languages allow statically typed and dynamically typed code to interact while maintaining benefits of both styles. The key to reasoning about these mixed programs is Siek-Vitousek-Cimini-Boyland’s (dynamic) gradual guarantee, which says that giving components of a program more precise types only adds runtime type checking, and does not otherwise change behavior. In this paper, we give a semantic reformulation of the gradual guarantee called graduality. We change the name to promote the analogy that graduality is to gradual typing what parametricity is to polymorphism. Each gives a local-to-global, syntactic-to-semantic reasoning principle that is formulated in terms of a kind of observational approximation. Utilizing the analogy, we develop a novel logical relation for proving graduality. We show that embedding-projection pairs (ep pairs) are to graduality what relations are to parametricity. We argue that casts between two types where one is “more dynamic†(less precise) than the other necessarily form an ep pair, and we use this to cleanly prove the graduality cases for casts from the ep-pair property. To construct ep pairs, we give an analysis of the type dynamism relation—also known as type precision or na\"{\i}ve subtyping—that interprets the rules for type dynamism as compositional constructions on ep pairs, analogous to the coercion interpretation of subtyping.}, +journal = {Proc. ACM Program. Lang.}, +month = {jul}, +articleno = {73}, +numpages = {30}, +keywords = {logical relations, gradual typing, observational error approximation, dynamic gradual guarantee} +} + + +@article{new-licata-ahmed2019, +author = {New, Max S. and Licata, Daniel R. and Ahmed, Amal}, +title = {Gradual Type Theory}, +year = {2019}, +issue_date = {January 2019}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {3}, +number = {POPL}, +url = {https://doi.org/10.1145/3290328}, +doi = {10.1145/3290328}, +abstract = {Gradually typed languages are designed to support both dynamically typed and statically typed programming styles while preserving the benefits of each. While existing gradual type soundness theorems for these languages aim to show that type-based reasoning is preserved when moving from the fully static setting to a gradual one, these theorems do not imply that correctness of type-based refactorings and optimizations is preserved. Establishing correctness of program transformations is technically difficult, because it requires reasoning about program equivalence, and is often neglected in the metatheory of gradual languages. In this paper, we propose an axiomatic account of program equivalence in a gradual cast calculus, which we formalize in a logic we call gradual type theory (GTT). Based on Levy’s call-by-push-value, GTT gives an axiomatic account of both call-by-value and call-by-name gradual languages. Based on our axiomatic account we prove many theorems that justify optimizations and refactorings in gradually typed languages. For example, uniqueness principles for gradual type connectives show that if the βη laws hold for a connective, then casts between that connective must be equivalent to the so-called “lazy†cast semantics. Contrapositively, this shows that “eager†cast semantics violates the extensionality of function types. As another example, we show that gradual upcasts are pure functions and, dually, gradual downcasts are strict functions. We show the consistency and applicability of our axiomatic theory by proving that a contract-based implementation using the lazy cast semantics gives a logical relations model of our type theory, where equivalence in GTT implies contextual equivalence of the programs. Since GTT also axiomatizes the dynamic gradual guarantee, our model also establishes this central theorem of gradual typing. The model is parametrized by the implementation of the dynamic types, and so gives a family of implementations that validate type-based optimization and the gradual guarantee.}, +journal = {Proc. ACM Program. Lang.}, +month = {jan}, +articleno = {15}, +numpages = {31}, +keywords = {gradual typing, call-by-push-value, graduality} +} + + +@InProceedings{siek_et_al:LIPIcs:2015:5031, + author = {Jeremy G. Siek and Michael M. Vitousek and Matteo Cimini and John Tang Boyland}, + title = {{Refined Criteria for Gradual Typing}}, + booktitle = {1st Summit on Advances in Programming Languages (SNAPL 2015)}, + pages = {274--293}, + series = {Leibniz International Proceedings in Informatics (LIPIcs)}, + ISBN = {978-3-939897-80-4}, + ISSN = {1868-8969}, + year = {2015}, + volume = {32}, + editor = {Thomas Ball and Rastislav Bodik and Shriram Krishnamurthi and Benjamin S. Lerner and Greg Morrisett}, + publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik}, + address = {Dagstuhl, Germany}, + URL = {http://drops.dagstuhl.de/opus/volltexte/2015/5031}, + URN = {urn:nbn:de:0030-drops-50312}, + doi = {10.4230/LIPIcs.SNAPL.2015.274}, + annote = {Keywords: gradual typing, type systems, semantics, dynamic languages} +} + + +@inproceedings{garcia-clark-tanter2016, +author = {Garcia, Ronald and Clark, Alison M. and Tanter, \'{E}ric}, +title = {Abstracting Gradual Typing}, +year = {2016}, +isbn = {9781450335492}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/2837614.2837670}, +doi = {10.1145/2837614.2837670}, +abstract = {Language researchers and designers have extended a wide variety of type systems to support gradual typing, which enables languages to seamlessly combine dynamic and static checking. These efforts consistently demonstrate that designing a satisfactory gradual counterpart to a static type system is challenging, and this challenge only increases with the sophistication of the type system. Gradual type system designers need more formal tools to help them conceptualize, structure, and evaluate their designs. In this paper, we propose a new formal foundation for gradual typing, drawing on principles from abstract interpretation to give gradual types a semantics in terms of pre-existing static types. Abstracting Gradual Typing (AGT for short) yields a formal account of consistency---one of the cornerstones of the gradual typing approach---that subsumes existing notions of consistency, which were developed through intuition and ad hoc reasoning. Given a syntax-directed static typing judgment, the AGT approach induces a corresponding gradual typing judgment. Then the type safety proof for the underlying static discipline induces a dynamic semantics for gradual programs defined over source-language typing derivations. The AGT approach does not resort to an externally justified cast calculus: instead, run-time checks naturally arise by deducing evidence for consistent judgments during proof reduction. To illustrate the approach, we develop a novel gradually-typed counterpart for a language with record subtyping. Gradual languages designed with the AGT approach satisfy by construction the refined criteria for gradual typing set forth by Siek and colleagues.}, +booktitle = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, +pages = {429-442}, +numpages = {14}, +keywords = {abstract interpretation, gradual typing, subtyping}, +location = {St. Petersburg, FL, USA}, +series = {POPL '16} +} + +@article{10.1145/2914770.2837670, +author = {Garcia, Ronald and Clark, Alison M. and Tanter, \'{E}ric}, +title = {Abstracting Gradual Typing}, +year = {2016}, +issue_date = {January 2016}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {51}, +number = {1}, +issn = {0362-1340}, +url = {https://doi.org/10.1145/2914770.2837670}, +doi = {10.1145/2914770.2837670}, +abstract = {Language researchers and designers have extended a wide variety of type systems to support gradual typing, which enables languages to seamlessly combine dynamic and static checking. These efforts consistently demonstrate that designing a satisfactory gradual counterpart to a static type system is challenging, and this challenge only increases with the sophistication of the type system. Gradual type system designers need more formal tools to help them conceptualize, structure, and evaluate their designs. In this paper, we propose a new formal foundation for gradual typing, drawing on principles from abstract interpretation to give gradual types a semantics in terms of pre-existing static types. Abstracting Gradual Typing (AGT for short) yields a formal account of consistency---one of the cornerstones of the gradual typing approach---that subsumes existing notions of consistency, which were developed through intuition and ad hoc reasoning. Given a syntax-directed static typing judgment, the AGT approach induces a corresponding gradual typing judgment. Then the type safety proof for the underlying static discipline induces a dynamic semantics for gradual programs defined over source-language typing derivations. The AGT approach does not resort to an externally justified cast calculus: instead, run-time checks naturally arise by deducing evidence for consistent judgments during proof reduction. To illustrate the approach, we develop a novel gradually-typed counterpart for a language with record subtyping. Gradual languages designed with the AGT approach satisfy by construction the refined criteria for gradual typing set forth by Siek and colleagues.}, +journal = {SIGPLAN Not.}, +month = {jan}, +pages = {429-442}, +numpages = {14}, +keywords = {subtyping, gradual typing, abstract interpretation} +} + + +@inproceedings{cimini-siek2016, +author = {Cimini, Matteo and Siek, Jeremy G.}, +title = {The Gradualizer: A Methodology and Algorithm for Generating Gradual Type Systems}, +year = {2016}, +isbn = {9781450335492}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/2837614.2837632}, +doi = {10.1145/2837614.2837632}, +abstract = {Many languages are beginning to integrate dynamic and static typing. Siek and Taha offered gradual typing as an approach to this integration that provides a coherent and full-span migration between the two disciplines. However, the literature lacks a general methodology for designing gradually typed languages. Our first contribution is to provide a methodology for deriving the gradual type system and the compilation to the cast calculus. Based on this methodology, we present the Gradualizer, an algorithm that generates a gradual type system from a well-formed type system and also generates a compiler to the cast calculus. Our algorithm handles a large class of type systems and generates systems that are correct with respect to the formal criteria of gradual typing. We also report on an implementation of the Gradualizer that takes a type system expressed in lambda-prolog and outputs its gradually typed version and a compiler to the cast calculus in lambda-prolog.}, +booktitle = {Proceedings of the 43rd Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages}, +pages = {443-455}, +numpages = {13}, +keywords = {type systems, methodology, semantics, gradual typing}, +location = {St. Petersburg, FL, USA}, +series = {POPL '16} +} + +@article{10.1145/2914770.2837632, +author = {Cimini, Matteo and Siek, Jeremy G.}, +title = {The Gradualizer: A Methodology and Algorithm for Generating Gradual Type Systems}, +year = {2016}, +issue_date = {January 2016}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +volume = {51}, +number = {1}, +issn = {0362-1340}, +url = {https://doi.org/10.1145/2914770.2837632}, +doi = {10.1145/2914770.2837632}, +abstract = {Many languages are beginning to integrate dynamic and static typing. Siek and Taha offered gradual typing as an approach to this integration that provides a coherent and full-span migration between the two disciplines. However, the literature lacks a general methodology for designing gradually typed languages. Our first contribution is to provide a methodology for deriving the gradual type system and the compilation to the cast calculus. Based on this methodology, we present the Gradualizer, an algorithm that generates a gradual type system from a well-formed type system and also generates a compiler to the cast calculus. Our algorithm handles a large class of type systems and generates systems that are correct with respect to the formal criteria of gradual typing. We also report on an implementation of the Gradualizer that takes a type system expressed in lambda-prolog and outputs its gradually typed version and a compiler to the cast calculus in lambda-prolog.}, +journal = {SIGPLAN Not.}, +month = {jan}, +pages = {443-455}, +numpages = {13}, +keywords = {gradual typing, methodology, semantics, type systems} +} + + + + + + + + + +@inproceedings{wadler-findler09, + author = "Philip Wadler and Robert Bruce Findler", + title = "Well-typed programs can't be blamed", + booktitle = esop, + year = 2009, + month = mar, + pages = "1--16", + location = "York, UK" +} + +@inproceedings{tobin-hochstadt06, + author = "Sam Tobin-Hochstadt and Matthias Felleisen", + title = "Interlanguage Migration: From Scripts to Programs", + booktitle = dls, + month = oct, + pages = "964--974", + year = 2006, +} + +@inproceedings{tobin-hochstadt08, + author = {Sam Tobin-Hochstadt and Matthias Felleisen}, + title = {The Design and Implementation of Typed Scheme}, + booktitle = popl08, + year = {2008}} + +@inproceedings{siek-taha06, + author = "Jeremy G. Siek and Walid Taha", + title = "Gradual Typing for Functional Languages", + booktitle = scheme, + month = sep, + pages = "81--92", + year = 2006 +} + +@inproceedings{findler-felleisen02, +author = {Robert Bruce Findler and Matthias Felleisen}, +title = {Contracts for higher-order functions}, +booktitle = icfp, +year = {2002}, +month = sep, +pages = {48--59} +}