\documentclass[11pt]{article}
% \documentclass[11pt,twoside]{article}

\usepackage{lecnotes}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{comment}
\input{fp-macros}

\newcommand{\lecdate}{September 1, 2016} % e.g. January 12, 2010
\newcommand{\lecnum}{2}           % e.g. 1
\newcommand{\lectitle}{From Rules to Propositions}         % e.g. Judgments and Propositions
\newcommand{\lecturer}{Frank Pfenning}         % e.g. Frank Pfenning

\begin{document}

\maketitle

\noindent
We review the ideas of ephemeral truth and linear inference with
another example from graph theory: constructing spanning trees for
graphs.  Next we consider how the process of grammatical inference in
the Lambek calculus~\cite{Lambek58}, where an order of facts is
prescribed, can be used to define other forms of computation.
Finally, we generalize our formalization of logical inference to
encompass \emph{hypothetical reasoning} which will give rise to
Gentzen's sequent calculus~\cite{Gentzen35}.  The sequent calculus
will be the gateway that allows us to move from pure logical inference
to a definition of logical connectives.

\section{Example: Spanning Trees}
\label{sec:spanning-trees}

A \emph{spanning tree} for a connected graph is a graph that has the
same nodes but only a subset of the edges such that there is no cycle.
In order to define rules for constructing a spanning tree for a graph
we will simultaneously manipulate two graphs: the original graph and
its spanning tree.  We therefore add a third argument to our
representation of graphs (from
\href{http://www.cs.cmu.edu/~fp/courses/15816-f16/lectures/01-inference.pdf}{Lecture
  1}) which identifies \emph{which} graph a node or edge
belongs to.
\[
\begin{tabular}{ll}
$\m{node}(x,g)$ & $x$ is a node in graph $g$ \\
$\m{edge}(x,y,g)$ & there is an edge from $x$ to $y$ in graph $g$
\end{tabular}
\]
The rule of symmetry stays within one graph $g$:
\[
\infer[\m{sym}]
  {\m{edge}(y,x,g)}
  {\m{edge}(x,y,g)}
\]
Now assume we have a graph $\m{g}$ and want to build a spanning tree
$\m{t}$.  Here is a simple algorithm for building $\m{t}$.  We begin
by picking an arbitrary node $x$ from $\m{g}$ and create $\m{t}$ with
$x$ as its only node.  Now we repeatedly pick an edge that connects a
node $x$ already in the tree with a node $y$ not yet in the tree and
add that edge and the node $y$ into the tree.  When no such edges
exist any more, we either must have a spanning tree already or the
original graph was not connected.  We can determine this, for example,
by checking if there are any nodes left in the graph that haven't
been added to the tree.

This algorithm has two kinds of steps, so its representation in
linear logic has two rules.  The first step moves an arbitrary
node from the graph to the tree.
\[
\infer[\m{start}?]
  {\m{node}(x,\m{t})}
  {\m{node}(x,\m{g})}
\]
This rule can be used only once, at the very beginning of the
algorithm and must be prohibited afterwards, or we could just use it
to move all nodes from the graph to the tree without moving any edges.
So we can either say the rule must be ephemeral itself, or we create a
new ephemeral proposition $\m{init}$ which only exists in the initial
state and is consumed by the first step.
\[
\infer[\m{start}]
  {\m{node}(x,\m{t})}
  {\m{init} \quad \m{node}(x,\m{g})}
\]
The next rule implements the idea we described in the text above.
All propositions are ephemeral, so we can implement ``\emph{a
node $y$ not yet in the tree}'' by checking whether it is
still in the graph, thereby consuming it.
\[
\infer[\m{move}]
  {\m{node}(x,\m{t}) \quad \m{edge}(x,y,\m{t}) \quad \m{node}(y,\m{t})}
  {\m{node}(x,\m{t}) \quad \m{edge}(x,y,\m{g}) \quad \m{node}(y,\m{g})}
\]
A proof using these two rules describes a particular
sequence of moves, taking edges from the graph and
adding them to the spanning tree.

In order to convince ourselves that this is correct, it is important
to understand the state invariants.  Initially, we have
\[
\begin{tabular}{ll}
$\m{init}$ \\
$\m{node}(x,\m{g})$ & for every node $x$ in $\m{g}$ \\
$\m{edge}(x,y,\m{g})$ & for every edge from $x$ to $y$ in $\m{g}$
\end{tabular}
\]
Rule $\m{move}$ does not apply, because we do not yet have a
node in $\m{t}$, so any inference must begin with rule $\m{start}$,
consuming $\m{init}$ and producing one node $x_0$ in $\m{t}$.
\[
\begin{tabular}{ll}
$\m{node}(x_0, \m{t})$ & for some node $x_0$ \\
$\m{node}(x,\m{g})$ & for every node $x\neq x_0$ in $\m{g}$ \\
$\m{edge}(x,y,\m{g})$ & for every edge from $x$ to $y$ in $\m{g}$
\end{tabular}
\]
Now rule $\m{start}$ can no longer be applied, and we apply $\m{move}$
as long as we can.  The rule preserves the invariant that each node
$x$ from the initial graph is either in $\m{t}$ ($\m{node}(x,\m{t})$)
or in $\m{g}$ ($\m{node}(x,\m{g})$).  It further preserves the
invariant that each edge in the original graph is either in $\m{t}$
($\m{edge}(x,y,\m{t})$) or still in $\m{g}$ ($\m{edge}(x,y,\m{g})$).

If the algorithm stops and no nodes are left in $\m{g}$, we must have
moved all $n$ nodes originally in $\m{g}$.  One is moved in the
$\m{start}$ rule, and $n-1$ are moved in applications of the
$\m{move}$ rule.  In every application of the $\m{move}$ rule we also
move exactly one edge from $\m{g}$ to $\m{t}$, so $\m{t}$ now has $n$
nodes and $n-1$ edges.  Further, it is connected since anytime we move
an edge it connects to something already in the partial spanning tree.
A connected graph with $n$ nodes and $n-1$ edges must be a tree, and
it spans $\m{g}$ because it has all the nodes of $\m{g}$.

If the algorithms stops and there are some nodes left in $\m{g}$, then
the original graph must have been disconnected.  Assume that $\m{g}$
is connected, $y$ is left in $\m{g}$, and we started with $x_0$ in the
first step.  Because $\m{g}$ is connected, there must be a path from
$x_0$ to $y$.  We prove that this is impossible by induction on the
structure of this path.  The last edge connects some node $y'$ to $y$.
If $y'$ is in the tree, then the rule $\m{move}$ would apply, but we
stipulated that the algorithm only stops if $\m{move}$ does not apply.
If $y'$ is in the graph but not in the tree, then we apply the
induction hypothesis to the subpath from $x_0$ to $y'$.

\section{Example: Counting in Binary}
\label{sec:counting}

In this section we see how to encode binary counting via ordered
inference as in the Lambek calculus.  We represent a binary number
$1011_2$ (which is eleven) by the following ordered propositions:
\[
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}
\]
where $\m{b1}$ represents bit 1, $\m{b0}$ represents the bit 0, and
$\m{eps}$ represents the empty string, thereby marking the end of the
binary string.  We think if increment as another proposition we
add at the \emph{right end} of the string.  For example, if we want
to increment the number above twice, we would write
\[
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc}
\]
If we define the write inference we would like to infer
\[\begin{array}{l}
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc} \\
\vdots \\
\m{eps} \; \m{b1}\; \m{b1}\; \m{b0}\; \m{b1}
\end{array}\]
Before you turn the page you might consider if you can defined
ordered inference rules to define the increment operation.

\clearpage
We need the following three rules:
\[\begin{array}{c}
\infer[\m{inc0}]
 {\m{b1}}
 {\m{b0}\; \m{inc}}
\hspace{2em}
\infer[\m{inc1}]
 {\m{inc}\; \m{b0}}
 {\m{b1}\; \m{inc}}
\hspace{2em}
\infer[\m{inceps}]
 {\m{eps}\; \m{b1}}
 {\m{eps}\; \m{inc}}
\end{array}\]
The $\m{inc1}$ rule implements the carry bit by increment the
remainder of the bit string, while $\m{inceps}$ deposits the carry as
the highest bit in case we have reached the end of the bit string.

These rules encode some parallelism.  For example, after
a single step of inference we have
\[\begin{array}{l}
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc} \\
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{inc} \; \m{b0}\; \m{inc} \\
\vdots \\
\m{eps} \; \m{b1}\; \m{b1}\; \m{b0}\; \m{b1}
\end{array}\]
Here we only show the state after inference and not the rule used
(which is $\m{inc1}$) for the sake of conciseness.  In the second
line, we can apply $\m{inc1}$ or $\m{inc0}$ or (because they are
independent) both of them simultaneously, which gives us
\[\begin{array}{l}
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc} \\
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{inc} \; \m{b0}\; \m{inc} \\
\m{eps} \; \m{b1}\; \m{b0}\; \m{inc}\; \m{b0} \; \m{b1} \\
\vdots \\
\m{eps} \; \m{b1}\; \m{b1}\; \m{b0}\; \m{b1}
\end{array}\]
Now we can obtain the desired conclusion with one more step of
inference.
\[\begin{array}{l}
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc} \\
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{inc} \; \m{b0}\; \m{inc} \\
\m{eps} \; \m{b1}\; \m{b0}\; \m{inc}\; \m{b0} \; \m{b1} \\
\m{eps} \; \m{b1}\; \m{b1}\; \m{b0}\; \m{b1}
\end{array}\]


\section{Ordered Hypothetical Judgments}

The notion of grammatical inference represents parsing as
the process of constructing a proof.  For example, if
we have a phrase (= sequence of words) $w_1 \ldots w_n$
we find their syntactical types $x_1 \ldots x_n$ (guessing
if necessary if they are ambiguous) and then set up the
problem
\[
\deduce[\vdots]
  {\mbox{?} : s}
  {(w_1 : x_1) \cdots (w_n : x_n)}
\]
where ``$\mbox{?}$'' will represent the parse tree as a properly
parenthesized expression (assuming, of course, we can find a proof).

So far, we can only represent the inference itself, but not the
\emph{goal} of parsing a whole sentence.  In order to express that we
introduce \emph{hypothetical judgments} as a new primitive concept.
The situation above is represented as
\[ (w_1 : x_1) \cdots (w_n : x_n) \vdash \mbox{?} : s \]
or, more generally, as
\[ (p_1 : x_1) \cdots (p_n : x_n) \vdash r : z \]
The turnstile symboxl ``$\vdash$'' here separates the \emph{succedent}
$r:z$ from the \emph{antecedents} $p_i : x_i$.  We sometimes call the
left-hand side the \emph{context} or the \emph{hypotheses} and the
right-hand side the \emph{conclusion}.  Calling the succedent a
conclusion is accurate in the sense that it is the conclusion of a
hypothetical deduction, but it can also be confusing since we also
used ``conclusions'' to describe what is below the line in a rule of
inference.  We hope it will always be clear from the situation which
of these we mean.

Since we are studying ordered inference right now, the antecedents
that form the context are intrinsically ordered.  When we want to
refer to a sequence of such antecedents we write $\Omega$ where
``Omega'' is intended to suggestion ``Order''.  When we capture other
forms of inference like linear inference we will revisit this
assumption.

\section{Inference with Sequents: Looking Left}

Now that we have identified hypothetical judgments, written as
sequents $\Omega \vdash r : z$, we should examine what this means
for our logical rules of inference.  Fortunately, we have had
only two connectives, \emph{over} and \emph{under}, first shown
here without the proof terms (that is, without the parse trees):
\[
\infer[\m{over}]
 {x}
 {x \lover y & y}
\hspace{3em}
\infer[\m{under}]
 {x}
 {y & y \lunder x}
\]
Now that the propositions we know appear as antecedents, the
direction of the rules appears to be reversed when considered
on sequents.
\[
\infer[{\lover}L^*]
 {\Omega_L \; (x\lover y)\; y \; \Omega_R \vdash z}
 {\Omega_L \; x \; \Omega_R \vdash z}
\hspace{3em}
\infer[{\lunder}L^*]
 {\Omega_L \; y \; (y\lunder x) \; \Omega_R \vdash z}
 {\Omega_L \; x \; \Omega_R \vdash z}
\]
We have written $\Omega_L$ and $\Omega_R$ to indicate the rest of the
context, which remains unaffected by the inference.  These rules
operate on the left of the turnstile, that is, on antecedents, and we
have therefore labeled them ${\lover}L^*$ and ${\lunder}L^*$,
pronounced \textit{over left} and \textit{under left}.  While helpful
for today's lecture, we will have to revise these rules at the
beginning of the next lecture, so we have marked them with an asterisk
to remind us that they are only preliminary.

Redecorating the rules with proof terms (that is, parse trees
in grammatical inference):
\[
\infer[\m{over}]
 {(p\, q) : x}
 {p : x \lover y & q : y}
\hspace{3em}
\infer[\m{under}]
 {(q\, p) : x}
 {q : y & p : y \lunder x}
\]
Now that the propositions we know appear as antecedents, the
direction of the rules appears to be reversed when considered
on sequents.
\[
\infer[{\lover}L^*]
 {\Omega_L \; (p:x\lover y)\; (q:y) \; \Omega_R \vdash r : z}
 {\Omega_L \; ((p\, q):x) \; \Omega_R \vdash r : z}
\hspace{2em}
\infer[{\lunder}L^*]
 {\Omega_L \; (q:y)\; (p:y\lunder x) \; \Omega_R \vdash r : z}
 {\Omega_L \; ((q\, p):x) \; \Omega_R \vdash r : z}
\]

Our inferences, now taking place on the antecedent, take
us upward in the tree, so when we have a situation such
as
\[
\deduce[\vdots]
  {p : s}
  {(w_1 : x_1) \cdots (w_n : x_n)}
\]
where we \emph{have} deduced $p:s$, we are now in the situation
\[
\deduce[\vdots]
  {(w_1 : x_1) \cdots (w_n : x_n) \vdash \mbox{?} : s}
  {p : s \vdash \mbox{?} : s}
\]
This means we need one more rule to complete the proof and
signal the success of a hypothetical proof.  Both forms with
and without the proof terms should be self-explanatory.
We use $\m{id}$ (for \emph{identity}) to label this inference.
\[
\infer[\m{id}_x]
 {x \vdash x}
 {\mathstrut}
\hspace{3em}
\infer[\m{id}_x]
 {p : x \vdash p : x}
 {\mathstrut}
\]
Because we wanted to represent the goal of parsing a sequence of words
as complete sentence, no additional antecedents besides $x$ are
permitted in this rule.  Otherwise, a phrase such as \textit{Bob likes
  Alice likes} could be incorrectly seen to parse as the sentence
\textit{((Bob likes) Alice)} ignoring the second \textit{likes}.

\section{Inference with Sequents: Looking Right}

We already noted in
\href{http://www.cs.cmu.edu/~fp/courses/15816-f16/lectures/01-inference.pdf}{Lecture
  1} that $x \lunder (y \lover z)$ should be somehow equivalent to
$(x \lunder y) \lover z$ since both yield a $y$ when given and $x$ to
the left and $z$ to the right.  Setting this equivalence up as two
hypothetical judgments
\[
x \lunder (y \lover z) \vdash (x \lunder y) \lover z
\]
and
\[ 
(x \lunder y) \lover z \vdash x \lunder (y \lover z)
\]
that we are trying to prove however fails.  No inference is possible.
We are lacking the ability to express when we can deduce a
\emph{succedent} with a logical connectives.  Lambek states
that we should be able to deduce
\[\begin{array}{ccc}
\infer[]
 {x \lover y\mathstrut}
 {z\mathstrut}
& \qquad \mbox{if} \qquad &
\infer[]
 {x\mathstrut}
 {z & y\mathstrut}
\end{array}\]
So $x \lover y$ should follow from $z$ if we get $x$ if we
put $y$ to the right of $z$.  With pure inference, as practiced
in the last lecture, we had no way to turn this ``\textit{if}''
into form of inference rule.  However, armed with hypothetical
judgments it is not difficult to express precisely this:
\[
\infer[]
  {z \vdash x \lover y}
  {z\; y \vdash x}
\]
Instead of a single proposition $z$ we allow a context, so
we write this
\[
\infer[{\lover}R]
  {\Omega \vdash x \lover y}
  {\Omega\; y \vdash x}
\]
This is an example of a \emph{right rule}, because it analyzes the
structure of a proposition in the succedent and we pronounce it as
\textit{over right}.  The ${\lunder}R$ (\textit{under right}) rule
can be derived analogously.
\[
\infer[{\lunder}R]
  {\Omega \vdash y \lunder x}
  {y\; \Omega \vdash x}
\]
In the next lecture we will look at the question how we know that
these rules are correct.  For example, we might have accidentally
swapped these two rules, in which case our logic would somehow
be flawed.  And, in fact, our rules are already flawed but we do
not have the tools yet to see this.

Let's come back to the motivating example and try to construct a
proof of
\[
x \lunder (y \lover z) \vdash (x \lunder y) \lover z
\]
Remember, all the rules work bottom-up, either on some antecedent (a
left rule) or on the succedent (a right rule).  No left rule applies
here (there is no $x$ to the left of $x \lunder (\ldots)$) but
fortunately the ${\lover}R$ rule does.
\[
\infer[{\lover}R]
 {x \lunder (y \lover z) \vdash (x \lunder y) \lover z}
 {x \lunder (y \lover z)\quad z \vdash x \lunder y}
\]
Again, no left rule applies (the parentheses are in the wrong place)
but a right rule does.
\[
\infer[{\lover}R]
 {x \lunder (y \lover z) \vdash (x \lunder y) \lover z}
 {\infer[{\lunder}R]
   {x \lunder (y \lover z)\quad z \vdash x \lunder y}
   {x\quad x \lunder (y \lover z)\quad z \vdash x}}
\]
Finally, now a left rule applies.
\[
\infer[{\lover}R]
 {x \lunder (y \lover z) \vdash (x \lunder y) \lover z}
 {\infer[{\lunder}R]
   {x \lunder (y \lover z)\quad z \vdash x \lunder y}
   {\infer[{\lunder}L^*]
     {x\quad x \lunder (y \lover z)\quad z \vdash y}
     {y \lover z\quad z \vdash y}}}
\]
One more left rule, and then we can apply identity.
\[
\infer[{\lover}R]
 {x \lunder (y \lover z) \vdash (x \lunder y) \lover z}
 {\infer[{\lunder}R]
   {x \lunder (y \lover z)\quad z \vdash x \lunder y}
   {\infer[{\lunder}L^*]
     {x\quad x \lunder (y \lover z)\quad z \vdash y}
     {\infer[{\lover}L^*]
       {y \lover z\quad z \vdash y}
       {\infer[\m{id}_y]
         {y \vdash y}
         {\mathstrut}}}}}
\]
The proof in the other direction is similar and left as
Exercise~\ref{exc:over-under}.

We have left out the proof terms here, concentrated entirely on the
logical connectives. We will return to proof terms for ordered
hypothetical judgment in a future lecture and proceed to conjecture
some logical connectives and how to define them via their left and
right rules.

\section{Alternative Conjunction}

As already mentioned in the last lecture, some words have more than
one syntactic type.  For example, \textit{and} has type
$s \lunder s \lover s$ (omitting parentheses now since the two forms are
equivalent by the reasoning the previous section) and also type
$n \lunder n^* \lover n$, constructing a plural noun from two singular
ones.  We can combine this into a single type $x \with y$, pronounced
\textit{$x$ with $y$}:
\[
\mi{and} : (s \lunder s \lover s) \with (n \lunder n^* \lover n)
\]
Then, in a deduction, we are confronted with a choice between the two
for every occurrence of \textit{and}.  For example, in typing
\textit{Alice and Bob work and Eve likes Alice}, we choose
$n \lunder n^* \lover n$ for the first occurence of \textit{and}, and
$s \lunder s \lover s$ for the second.

Lambek did not explicitly define this connective, but it would
be defined by the rules
\[
\infer[\m{with}_1]
  {x}{x \with y}
\hspace{3em}
\infer[\m{with}_2]
  {y}{x \with y}
\]
In the proof term we might write $.1$ for the first meaning
and $.2$ for the second meaning of the word.
\[
\infer[\m{with}_1]
  {p.1 : x}{p : x \with y}
\hspace{3em}
\infer[\m{with}_2]
  {p.2 : y}{p : x \with y}
\]
so that the parse tree for the sentence above might become
\[
 \mi{((Alice and.1 Bob) work) and.2 (Eve likes Bob)}
\]
where we have omitted parentheses that are redundant due
to the associativity of $\lunder {-} \lover$.

As before, these rules turn into left rules in the sequent
calculus, shown here only without the proof terms.
\[
\infer[{\with}L_1]
 {\Omega_L\; x \with y\; \Omega_R \vdash z}
 {\Omega_L\; x\; \Omega_R \vdash z}
\hspace{3em}
\infer[{\with}L_2]
 {\Omega_L\; x \with y\; \Omega_R \vdash z}
 {\Omega_L\; y\; \Omega_R \vdash z}
\]

To derive the right rule we must ask ourselves under which
circumstances we could use a proposition both as an $x$ and
as a $y$.  That's true, if we can show both.
\[
\infer[{\with}R]
 {\Omega \vdash x \with y}
 {\Omega \vdash x & \Omega \vdash y}
\]

\section{Concatenation}

In a sequent, there are multiple antecedents (in order!) but
only one succedent. So how could we encode the goal we had
in the binary counting example:
\[\begin{array}{l}
\m{eps} \; \m{b1}\; \m{b0}\; \m{b1}\; \m{b1}\; \m{inc}\; \m{inc} \\
\vdots \\
\m{eps} \; \m{b1}\; \m{b1}\; \m{b0}\; \m{b1}
\end{array}\]
Clearly, this is a hypothetical judgment but the succedent is not
a single proposition.  In order to define over and under, it is
important to maintain a single succedent, so we need to define
a new connective that expresses adjacency as a new proposition.
We write $x \fuse y$ (read: \emph{$x$ fuse $y$}).  In the Lambek
calculus, we would simply write
\[
\infer[\m{fuse}]
  {x \; y}
  {x \fuse y}
\]
As a left rule, this is simple turned upside down and becomes
\[
\infer[{\fuse}L]
  {\Omega_L \; x \fuse y\; \Omega_R \vdash z}
  {\Omega_L \; x \; y \; \Omega_R \vdash z}
\]
As a right rule for $x \fuse y$, we have to divide the context into to
segments, one proving $x$ and the other proving $y$.
\[
\infer[{\fuse}R]
  {\Omega_1 \; \Omega_2 \vdash x \fuse y}
  {\Omega_1 \vdash x & \Omega_2 \vdash y}
\]
Note that there is some nondeterminism in this rule if we
decide to use it to prove a sequent, because we have to decide
\emph{where} to split the context $\Omega = (\Omega_1\; \Omega_2)$.
For a context with $n$ propositions there are $n+1$ possibilities.
For example, if we want to express that a phrase represented
by $\Omega$ is parsed into \emph{two sentences} we can prove
the hypothetical judgment
\[ \Omega \vdash s \fuse s \]
We can then prove
\[\begin{array}{cccccc}
\mi{Alice} & \mi{works} & \mi{Bob} & \mi{sleeps} & & \mbox{?} \\
: & : & : & : & & : \\
n & n \lunder s & n & n \lunder s & \vdash & s \fuse s
\end{array}\]
but we have to split the phrase exactly between \textit{works} and
\textit{Bob} so that both premises can be proved.  Assuming a notation
of $p \cdot q : x \fuse y$ if $p:x$ and $q:y$, the proof term for
$s \fuse s$ in this example would be
$(\mi{Alice}\; \mi{works}) \cdot (\mi{Bob}\; \mi{sleeps})$.

\section{Emptiness}

In this section we consider $\one$, the unit of concatenation, which
corresponds to the empty context.  The left and right rules are
nullary versions of the binary concatenation.  In particular, there
must be no antecedents in the right rule for $\one$.
\[
\infer[{\one}R]
 {\vdash \one}
 {\mathstrut}
\hspace{3em}
\infer[{\one}L]
 {\Omega_L \; \one \; \Omega_R \vdash z}
 {\Omega_L\; \Omega_R \vdash z}
\]

\section{An Unexpected Incompleteness}

In functional programming there is a pattern called \emph{currying}
which says that instead of a functions of type
$(\tau \times \sigma) \rightarrow \rho$, passing a pair with values of
type $\tau$ and $\sigma$, we can pass the arguments sequentially as
indicated by the type $\tau \rightarrow (\sigma \rightarrow \rho)$.
Logically, this is manifested by the isomorphism between these two
types when considered as propositions, where $\times$ is conjunction
and $\rightarrow$ is implication.

Is there something similar in ordered logic?  First we note that
\emph{over} and \emph{under} are a form of implication, distinguished
merely by whether they expect their argument on the left or on the
right.  Concatenation is a form of conjunction since it puts together
two proofs.  Let's consider $(x \fuse y) \lunder z$.  This expect $x$
followed by $y$ to its left and concludes $z$.  Similarly,
$y \lunder (x \lunder z)$ expects a $y$ to its left and then an $x$
next to that which, if you had concatenated them together, would be
$x \fuse y$.  So these two seem like they should be intuitively
equivalent.  Let's try to use the tools of logic to prove that.

First, $y \lunder (x \lunder z) \vdash (x \fuse y) \lunder z$.
We show here the completed proof, but you should view it step
by step \emph{going upward} from the conclusion.
\[
\infer[{\lunder}R]
 {y \lunder (x \lunder z) \vdash (x \fuse y) \lunder z}
 {\infer[{\fuse}L]
   {x \fuse y \quad y \lunder (x \lunder z) \vdash z}
   {\infer[{\lunder}L^*]
     {x \quad y \quad y \lunder (x \lunder z) \vdash z}
     {\infer[{\lunder}L^*]
       {x \quad x \lunder z \vdash z}
       {\infer[\m{id}_z]
         {z \vdash z}
         {\mathstrut}}}}}
\]
Now for the other direction.  Unfortunately, this does not go as well.
\[
\infer[{\lunder}R]
 {(x \fuse y) \lunder z \vdash y \lunder (x \lunder z)}
 {\infer[{\lunder}R]
   {y \quad (x \fuse y) \lunder z \vdash x \lunder z}
   {\deduce
     {x \quad y \quad (x \fuse y) \lunder z \vdash z}
     {\mbox{?}}}}
\]
The sequent at the top should be intuitively provable, since we should
be able to combine $x$ and $y$ to $x \fuse y$ and then use the
${\lunder}L^*$ rule, but there is no such rule.  All rules in the
sequent calculus so far \emph{decompose} connectives in the antecedent
(left rules) or succedent (right rules), but here we would like to
\emph{construct} a proof of a compound proposition. We could add
an ad hoc rule to handle this situation, but how do we know that the
resulting system does not have other unexpected sources of incompleteness?

In the next lecture we will first fix this problem and then
systematically study how to ensure that our inference rules do not
exhibit similar problems.

\clearpage
\phantomsection
\addcontentsline{toc}{section}{Exercises}
\section*{Exercises}

\begin{exercise}\rm
  Consider variations of the representation and rules in the spanning
  tree example from \autoref{sec:spanning-trees}.  Consider all four
  possibilities of nodes and edges in $\m{g}$ and $\m{t}$ being
  ephemeral or persistent.  In each case show the form of the three
  rules in question: $\m{sym}$ (possibly with two variants),
  $\m{start}$, and $\m{move}$, indicate if the modification would be
  correct, and spell out how to check if a proper spanning tree has
  been built in the final state.
\end{exercise}

\begin{exercise}\rm
  Consider the encoding of binary numbers in ordered logic as in
  \autoref{sec:counting}.  Assume a new proposition $\m{par}$ for
  \emph{parity} and write rules so that the binary representation of a
  number followed by $\m{par}$ computes $\m{eps}\; \m{b0}$ or
  $\m{eps}\; \m{b1}$ if we have an even or odd number of ones,
  respectively.
\end{exercise}

\begin{exercise}\rm
  Represent the computation of a Turing machine using ordered
  inference as in Section~\ref{sec:counting}.  You will need to decide
  on a finite, but potentially unbounded representation of the tape,
  the finite number of states, and the state transitions, such that
  each step of the Turing machine corresponds to one or more steps of
  ordered inference. Make sure to describe all parts of the encoding
  carefully.
\end{exercise}

\begin{exercise}\rm
  Represent instances of Post correspondence problem in ordered logic
  so that ordered inference as in Section~\ref{sec:counting} from an
  initial state proves a distinguished proposition $\m{s}$ (for
  success) if and only if the problem has a solution.  One should be
  able to extract the actual solution from the proof.  Make sure to
  describe all parts of the encoding carefully.
\end{exercise}

\begin{exercise}\rm
  \label{exc:over-under}
  Prove $(x \lunder y) \lover z \vdash x \lunder (y \lover z)$
\end{exercise}

\begin{exercise}
  \label{exc:interaction}
  Find equivalences along the lines of associativity, currying, or
  distributivity laws and prove both directions.  Note (but do not
  prove) where they don't seem to exist if we restrict ourselves to
  the over, under, fuse, and with connectives.
  \begin{enumerate}
  \item $x \lover (y \fuse z)$
  \item $(x \fuse y) \lover z$
  \item $(x \with y) \lover z$
  \item $x \lover (y \with z)$
  \end{enumerate}
\end{exercise}

\clearpage
\phantomsection
\addcontentsline{toc}{section}{References}
\bibliographystyle{alpha}
\bibliography{fp,lfs}

% \cleardoublepage
\end{document}
