Received: from illc-sun.philo.uva.nl by moose.cs.indiana.edu
(8.7.1/IUCS.1.39) id NAA20886; Sun, 31 Dec 1995 13:36:49 -0500 (EST)
Received: by illc-sun.philo.uva.nl
id AA19376; Sun, 31 Dec 1995 19:36:44 +0100
Message-Id: <199512311836.AA19376@illc-sun.philo.uva.nl>
Date: Sun, 31 Dec 1995 19:36:44 +0100
From: dib@illc.uva.nl (David Beaver)
X-Organisation: Department of Philosophy
University of Amsterdam
Nieuwe Doelenstraat 15
NL-1012 CP Amsterdam
The Netherlands
X-Phone: +31 20 525 4500
X-Telex:
X-Fax: +31 20 525 4503
To: ITALLC96@cs.indiana.edu, dib@illc.uva.nl
Subject: AbAbstract for ITALLC96
Status: RO
There folloewsDear ITALLC96 ,
there follows an abstract in latex. I have t should print out on
9 pages of US standardletter paper. It (I have formatted I have formattetypeset it succesfulolyly on
textures for the mac, and on the standard unix releaswee version
2.09 of latex. (I did not try on latex 3.0.) Idf there is any
problem with typesetting or printing, I will be happy
to help, or to send hardcopy.
David Beaver
dib@illc.uva.nl
8<---------------------------------------------------
\documentstyle[fleqn,leqno]{article}
\newcommand{\Mark}[1]{{\em #1}}
\newcommand{\abbreviate}[2]{\newcommand{#1}{\mbox{#2}}}
% Boxed Cite:
\newcommand{\bcite}[1]{\makebox[1 cm]{\cite{#1}}}
% Specified width Boxed Cite
\newcommand{\sbcite}[2]{\makebox[#2 cm]{\cite{#1}}}
\newenvironment{nb}[1] {\begin{quotation}{\em #1}:\begin{footnotesize}}
{\end{footnotesize}\end{quotation}}
\newcommand{\test} {\partial}
\newcommand{\Test} {\partial}
\newcommand{\TestHeim} {\cal H}
\newcommand{\6}{\mbox{$[\hspace*{-.6mm}[$}} % Semantic bracket
\newcommand{\9}{\mbox{$]\hspace*{-.6mm}]$}}
\newcommand {\sem}[2] {\6 #1 \9_\iss{#2}}
\newcommand {\Int}[2] {\| #1 \|_\iss{#2}}
\newcommand {\tup}[1]{\langle #1 \rangle}
\newcommand {\where}{\mid\:}
\newcommand {\is} [1] {{\mbox{\protect\scriptsize \it #1}}}
\newcommand {\iss} [1] {{\mbox{\protect\tiny \rm #1}}}
\newcommand {\eq}{\! \equn \!}
\newcommand {\equn}{\doteq}
\newcommand {\badeg}{\sqz{?}}
\newcommand {\EqDef}{\equiv_{\mbox{def}}}
\newcommand{\GAP}[1]{\mbox{\hspace{3 mm} #1}}
%\newcommand {\ind}{\hspace*{0.7cm}}
\newcommand {\arr}{\nolinebreak $\Rightarrow$ \nolinebreak }
\newcommand {\ty}[1] {\!:\!#1\:}
\newcommand {\spac} {\vspace{1 in}}
\newcommand {\halfspac} {\vspace{0.5 in}}
\newcommand {\ifof}{\mbox{\it\ iff\ }}
\newcommand {\when}{\mbox{\it\ if\ }}
\newcommand {\Else}{\mbox{\it\ else\ }}
\newcommand {\ifdef}{\mbox{\it\ if defined}}
\newcommand {\elseun}{\mbox{\it\ else undefined}}
\newcommand {\sss}[1] {\sem{#1}{3}^{w}}
\newcommand {\AND}{\mbox{\it\ and\ }}
\newcommand {\OR}{\mbox{\it\ or\ }}
\newcommand {\uentails}{\models_{u}}
\newcommand {\tentails}{\models_{3}}
\newcommand {\ABu}[1]{\mbox{\sc #1$^{u}$}}
\newcommand {\WET}[1]{\mbox{\it
#1}_{\tup{\omega,\tup{\epsilon,\tau}}}}
\newcommand {\ETT}[1]{\mbox{\it #1}_{\tup{\varepsilon,\tau}}}
\newcommand {\ABb}[1]{\mbox{\sc #1$^{b}$}}
\newcommand {\ABa}[1]{\mbox{\sc #1$^{a}$}}
\newcommand {\Name}[1]{\mbox{\sc #1}}
\newcommand {\Entails}{\models}
\newcommand {\Centails}{\Entails_{\sigma}}
\newcommand {\Aentails}{\Entails_{\mbox{\scriptsize able}}}
\newcommand {\Union}{\cup}
\newcommand {\Intersection}{\cap}
\newcommand {\Minus}{\backslash}
\newcommand {\Subset}{\sqsubseteq}
\newcommand {\Compose}{\circ}
\newcommand {\KplMinus}{\wr}
\newcommand {\KplEqual}{\cong}
\newcommand {\Trans} [1] {#1^{\star}}
\newcommand {\might} {\mbox{might}\:}
\newcommand {\Map}{\leadsto}
\newcommand {\Strengthens}{\,\mbox{\it strengthens}\,}
\newcommand {\Close}[1]{\downarrow#1}
\newcommand {\Satisfies}{\,\mbox{\it satisfies}\,}
\newcommand {\Consistentwith}{\,\mbox{\it consistent-with}\,}
\newcommand {\Consistent}{\,\mbox{\it consistent}\,}
\newcommand {\Presupposition}{\,\mbox{\it presupposition}\,}
\newcommand {\Presupposes}{\,\mbox{\it presupposes}\,}
\newcommand {\Admits}{\,\mbox{\it admits}\,}
\newcommand {\atomic}{{\cal P}}
\newcommand {\worlds}{{\cal W}}
\newcommand {\Goesto}{\mbox{$\Rightarrow$}}
\newcommand {\Sep}{\mbox{$\mid$}}
\newcommand{\IB}[1]{\mbox{\it #1}}
\newcommand{\Stop}{\mbox{$\circ$}}
% UL/PUL/KPL Language:
\newcommand {\DMPLPoss}{\mbox{\sc might$_{\it dmpl}$}}
\newcommand {\Poss}{\mbox{\sc might}}
\newcommand {\Necc}{\mbox{\sc must}}
\newcommand {\And}{\,\mbox{\sc and}\,}
\newcommand {\Implies}{\,\mbox{\sc implies}\,}
\newcommand {\Or}{\,\mbox{\sc or}\,}
\newcommand {\Not}{\:\mbox{\sc not}\,}
%\newcommand {\Poss}{\mbox{\it might}}
%\newcommand {\Necc}{\Box}
%\newcommand {\And}{\,\mbox{\it and}\,}
%\newcommand {\Implies}{\,\mbox{\it implies}\,}
%\newcommand {\Or}{\,\mbox{\it or}\,}
%\newcommand {\Not}{\:\mbox{\it not}\,}
%\newcommand {\And}{\,\&\,}
%\newcommand {\Or}{\vee}
%\newcommand {\Not}{\hbox{$\sim$}}
\newcommand {\Sand}{\,\&\,}
\newcommand {\ExistS}{{\cal E}_\iss{S}}
\newcommand {\Forall}{{\cal A}_\iss{S}}
\newcommand {\ExistP}{{\cal E}_\iss{P}}
\newcommand {\ResolveS}{{\cal R}_\iss{S}}
\newcommand {\ResolveP}{{\cal R}_\iss{P}}
\newcommand {\Def}[2] {{\it def}_{#1}.#2}
\newcommand {\True}{\mbox{\em true}}
% Type theory metalanguage:
\newcommand {\Niet}{\neg}
\newcommand {\En}{\wedge}
\newcommand {\Of}{\vee}
\newcommand {\Implic}{\rightarrow}
\newcommand {\Waar}{\top}
\newcommand {\Desda}{\leftrightarrow}
\newcommand {\Onwaar}{\bot}
\newcommand{\Natural}
{\mbox{\rm I}\hspace{-1.9 pt}\mbox{\rm N}}
\newcommand{\Integer}{{\cal I}}
%{\mbox{\rm I}\hspace{-1.9 pt}\mbox{\rm I}}
\newenvironment{defn}[1]{\begin{defin}[#1]\samepage}{\end{defin}}
\newtheorem{defin}{Definition}
\newenvironment{deriv}[1]{\begin{derivation}[#1]\samepage}{\end{derivation}}
\newtheorem{derivation}{Derivation}
\newtheorem{alternative}{Alternative Definition}
\newtheorem{postulate}{Meaning Postulate}
\newtheorem{convention}{Notation Convention}
\newtheorem{vaguery}[defin]{Vaguery}
\newtheorem{fact}{Fact}
\newtheorem{lemma}{Lemma}
\newtheorem{proof}{Proof}
\newtheorem{prop}{Proposition}
\newtheorem{dummy}{Dummy Definition}
\renewcommand{\thepostulate}{\mbox{\rm MP\arabic{postulate}}}
\renewcommand{\thedefin}{\mbox{\rm D\arabic{defin}}}
\renewcommand{\thefact}{\mbox{\rm F\arabic{fact}}}
\renewcommand{\thelemma}{\mbox{\rm L\arabic{lemma}}}
\newenvironment{deritable}{\begin{tabular}{@{}l@{$\leadsto$}p{10 cm}}}
{\end{tablular}}
\abbreviate{\Ty} {\mbox{\rm Ty$_{3}$}}
\abbreviate{\PE} {\it PE}
%\abbreviate{\Dom} {\it dom}
\abbreviate{\LEC}{${\cal L}_{\it EC}$}
\abbreviate{\VAR}{\sc dm}
\abbreviate{\QUANT}{\sc quant}
\abbreviate{\DET}{\sc det}
\abbreviate{\PRED}{$\cal{P}$}
\abbreviate{\SOME}{\sc some}
\abbreviate{\MOST}{\sc most}
\abbreviate{\FEW}{\sc few}
\abbreviate{\NO}{\sc no}
\abbreviate{\EVERY}{\sc every}
\abbreviate{\THE}{\sc the}
\abbreviate{\EXONE}{\sc exactly-one}
\abbreviate{\FORM}{\sc form}
\abbreviate{\SHE}{\sc she}
\abbreviate{\HE}{\sc he}
\abbreviate{\THEY}{\sc they}
\abbreviate{\IT}{\sc it}
\abbreviate{\Lable}{$\cal{L}_{\rm ABLE}$}
\abbreviate{\That}{\it that}
% Special abbreviations for ABLE semantics
\abbreviate{\Unfold}{\it unfold}
\abbreviate{\Dist}{\it distribute}
\newcommand {\Encapsulate}{\nabla}
\abbreviate{\Singleton}{\it singleton}
\newcommand {\dmtype}{d}
\newcommand {\Neg}{\mbox{\it neg}}
\newcommand {\Is}{\,\mbox{\sc is}\,}
\abbreviate {\DEF}{\it defined}
\abbreviate {\DOM}{\it domain}
\abbreviate {\TDOM}{\it t-domain}
\abbreviate {\PDOM}{\it p-domain}
\abbreviate {\ENTAILS}{\it entails}
\abbreviate {\Sensible}{\it sensible}
%\newcommand {\Absurd}{\underline{\bot}}
\newcommand {\Absurd}{\fbox{\mbox{\rm :\hspace{.5 mm}--(}}}
%\newcommand {\Bliss}{\overline{\top}}
\newcommand {\Bliss}{\fbox{\mbox{\rm :\hspace{.5 mm}--)}}}
\newcommand {\Empty} {\odot}
\abbreviate {\Worldset}{\it $\omega$-set}
\newcommand {\Extends}{\succeq}
\newcommand {\Evokes}{\,\mbox{\it evokes}\,}
\abbreviate {\Qset} {\it q-set}
%\abbreviate {\Rset} {\it r-set}
%\abbreviate {\Sset} {\it s-set}
\abbreviate {\AddInf} {+}
\abbreviate {\AddAss} {\it add}
\newcommand {\BS}{\backslash}
\newcommand {\Merge}{\sqcup}
\newcommand {\QQ}{\mbox{$\cal Q$}}
\newcommand {\PP}{\mbox{$\cal P$}}
\abbreviate {\Sprop}{\it s-prop}
\abbreviate {\Dprop}{\it d-prop}
% Predicates in fragment
\abbreviate {\OWNS}{\bf owns}
\abbreviate {\WOMAN}{\bf woman}
\abbreviate {\WALKS}{\bf walks}
\abbreviate {\NAMEDJOHN}{\bf named\_john}
\abbreviate {\APPLY}{\it pred}
\newcommand {\WAS}{\unrhd}
\abbreviate {\NEXT}{\it next}
\abbreviate{\NQ}{\it nq}
\newcommand{\EQV}{\equiv}
\abbreviate{\sco}{\it sco}
\abbreviate{\res}{\it res}
\abbreviate{\ini}{\it init}
%\abbreviate{\outseq}{$$}
%\abbreviate{\iniseq}{$f$}
\abbreviate{\scoset}{$G_\iss{sco}$}
\abbreviate{\resset}{$G_\iss{res}$}
\abbreviate{\resx}{$X_\iss{res}$}
\abbreviate{\scox}{$X_\iss{sco}$}
% Silly abbreviations
\abbreviate{\BC}{\mbox{\it bc}}
\abbreviate{\CC}{\mbox{\it cc}}
\abbreviate{\DC}{\mbox{\it dc}}
\abbreviate{\BA}{\mbox{\it ba}}
\abbreviate{\CA}{\mbox{\it ca}}
\abbreviate{\DA}{\mbox{\it da}}
\abbreviate{\BIH}{\mbox{\it bih}}
\abbreviate{\RBIH}{\mbox{\it cb\_a\_bih}}
\abbreviate{\AWFB}{\mbox{\it awfb}}
\abbreviate{\RBIA}{\mbox{\it cb\_a\_ba}}
\title{A Plea for Common Sense:\\ {\large {\it Extended Abstract for
ITALLC`96\\(Categorisation: Formal theories of pragmatics and discourse)
}}}
\author{David Beaver\\
ILLC, University of Amsterdam}
\date{}
\textwidth 5.50in % voor SALT/Proc
\marginparwidth 1.00in % voor SALT/Proc
\oddsidemargin .60in % voor SALT/Proc
\evensidemargin .60in % voor SALT/Proc
\topmargin -.50in % voor SALT/Proc
\textheight 9.00in % voor SALT/Proc
\begin{document}
\maketitle
\section{Introduction}
Life is short. There is not enough time to explain everything. As
speakers, or writers, we are forced to make assumptions. It is
common to be advised to fix in ones mind a picture of the audience,
that is, to make an advance decision as to what the audience can be
expected to know. Often, especially given limitations of time for
speaking or space for writing, one is forced to take much for
granted. As a result, cases of presupposition failure, the situation
occurring when the speaker or writer takes for granted something of
which the hearer or reader is not previously aware, are surely
common. Somehow, hearers and readers cope, and usually without
complaining.
The author, in most {\em genres}, assumes that the text will be read
linearly, and further assumes, optimistically, that readers will
gather information throughout the reading process. So what the author
has is not a fixed picture of the common ground with the intended
readership, but a rather rough cut and idealised movie of how this
common ground should develop. Each frame in the movie approximates
what is common between relevant aspects of (1) the author's beliefs at
time of writing, and (2) the readers' beliefs as they reach some point
in the text. At risk of straining the cinematic metaphor somewhat, it
could be said that the text itself is analogous to
a script, but with detailed screenplay and directorial instructions
omitted. Barring a major scientific breakthrough, the corresponding
film will never be put on general release, so precisely how the writer
intends the information state of idealised readers to evolve as they
read is never made public in all its technicolor glory.
In this extended abstract, I will describe in brief a model
of how readers' information states do evolve, as they construct their own movies
on
the basis of the script.
In directing their own films readers second-guess the intentions of
the original writer-director. But to understand how readers work out
the writer-director's intentions, it is firstly necessary to know more
of the craft of the writer-director. In the coming section I will
elaborate on how, working from an assumption as to initial conditions,
the author envisages the evolution of the common ground. It will be
helpful to adopt some of the formalism of recent dynamic semantics. I
will build particularly on ideas of Stalnaker \cite{sta}, Karttunen
\cite{kart:74} and Heim \cite{heim:83}, and use formal techniques
related to those discussed by Groenendik and Stokhof, e.g.
\cite{gs:dpl}, and Veltman \cite{veltman:us}.
\section{The Writer's View of the Common Ground}
I will now present what will here be called Presupposition Logic, a
simple propositional system with a dynamic semantics and dynamic
notion of semantic entailment. Further discussion and motivation can be
found in \cite{beaver:kpl,beaver:thesis,beaver:salt,beaver:official}.
Presupposition Logic provides a model of how a speaker or author envisages
the common ground evolving. This evolution is iterative, since the
common ground at any instant provides the context in which a given
chunk of text is interpreted, and it is the effect of this
interpretation which determines what the common ground will be going
into interpretation of the next chunk. It is no longer controversial
to assert that the interpretation process relies on such iteration,
but there remains some question as to the course-grainedness of the
iteration. For instance in the work of Gazdar \cite{gazdar} (and also
related proposals such as Mercer's \cite{mercer}) it is whole
sentences which produce a change in the context of interpretation.
However in Karttunen-Heim style treatments of presupposition such as
that introduced in this paper, as well as in treatments of anaphora
due to Heim \cite{heim:thesis}, Kamp \cite{kamp:drt} and Groenendijk
and Stokhof \cite{gs:dpl}, a finer grained iteration is involved, with
sub-sentential constituents producing their own effects on local
contexts of interpretation.
We begin by assuming some set of atomic proposition symbols. A model is
a pair $\tup{W,I}$, where $W$\ is a set of worlds and $I$\ is an
interpretation function mapping each atomic proposition symbol to a
subset of $W$. The Context Change Potential (to borrow Heim's
terminology) of a formula $\phi$\ written $\sem{\phi}{}$\ is a set of
pairs of input and output contexts, where a context is the writer's
view of the common ground. Following Stalnaker, a context is thought
of as a set of possible worlds, the set containing all and only those
worlds compatible with the information supposed to be common. I
will write $\sigma\sem{\phi}{}\tau$\ to indicate that the pair
$\tup{\sigma,\tau}$\ is in the $\sem{\phi}{}$\ relation, i.e. an input
context $\sigma$\ updated with the information in $\phi$\ can produce
an output context $\tau$.
Defining contexts as subsets of $W$\ intoduces a
natural lattice structure with union and intersection as meet and
join, and this lattice provides to an intuitive
model-theoretic charactarisation of the amount of information in a
given context. A minimal context (with respect to some model) may be
defined as the set of all worlds (in that model): this is
the state of blissful ignorance in which no information about the
world is available. Similarly a maximally informative
non-contradictory context would be a singleton set: the available
information rules out all except one world. However, it is possible to
add even more information to such a context, and in case this
information contradicts previous information we will arrive at a
context containing no worlds, a truly maximal but contradictory
context.
Definition \ref{defn:semul}, below, gives an update semantics for the
language of propositional logic. The first clause says that the result
of updating a context with an atomic proposition is an output
containing only those worlds in the input which are in the extension
of the proposition. A context can be updated with a conjunction of two
formulae (the second clause) just in case it can firstly be updated
with the left conjunct to produce an intermediary context ($\upsilon$\
in the definition), and this context can be updated with the
right-hand conjunct to produce the final output ($\tau$). The third
clause says that a context can be updated with the negation of a
formula just in case there is some state that can be obtained by
updating the context with the negated formula itself, in which case
the result of updating with the whole formula is the set of worlds in
the input which are not present in the update with the negated
formula. In other words, the effect of updating with the negation of a
formula is to remove all information compatible with the formula. The
final two clauses define implication and disjunction by (carefully selected)
standard equivalences.
\begin{defn}{Update Semantics for
Propositional Logic}\label{defn:semul}
For all models ${\cal M}\;=\;\tup{W,I}$ and information states $\sigma,\tau$,
the
relation $\sem{.}{}^{\cal M}$ (superscript omitted where
unambiguous) is given recursively by:
\begin{eqnarray}
%
\sigma\sem{p_\iss{atomic}}{}\tau & \ifof &
\tau= \{w\in\sigma \mid w\in I(p)\}\\
%
\sigma\sem{\phi \En \psi}{}\tau & \ifof &
\exists \upsilon \;\;
\sigma\sem{\phi}{}\upsilon\sem{\psi}{}\tau \\
%
\sigma\sem{\Niet \phi}{}\tau & \ifof &
\exists \upsilon \;\; \sigma\sem{\phi}{}\upsilon
\;\En\;
\tau = \sigma\Minus\upsilon\\
%
\sigma\sem{\phi \Implic \psi}{}\tau & \ifof &
\sigma\sem{\Niet(\phi\En (\Niet\psi))}{}\tau\\
%
\sigma\sem{\phi \Of \psi}{}\tau & \ifof &
\sigma\sem{\Niet(\Niet\phi \En \Niet\psi)}{}\tau
%
\end{eqnarray}
\end{defn}
A context $\sigma$\ satisfies a formula $\phi$\ (
$\sigma\models\phi$) if updating adds
no new information, producing an output identical to the input.
One formula entails another ($\phi\models\psi$) if any update with
the first produces a context in which the second is satisfied:
\begin{defn}{Satisfaction and Dynamic
Entailment}\label{defn:sat+ent}
\begin{eqnarray*}
%
\sigma\models\phi & \ifof & \sigma\sem{\phi}{}\sigma\\
\phi\models\psi & \ifof &
\forall \sigma,\tau\;\; (\sigma\sem{\phi}{}\tau \Rightarrow
\tau\models\psi)
%
\end{eqnarray*}
\end{defn}
Over the standard propositional language this notion of entailment
is classical. However, we will now extend the language with a
presupposition operator $\partial$: the resulting logic will be
non-classical. For example, commuting conjunctions will no longer
uniformly preserve validity. The intuition behind the following
definition is that a formula $\partial\phi$\ (``the presupposition that
$\phi$'') places a constraint on the input context, only allowing
update to continue if the presupposed proposition is already
satisfied.
\begin{defn}{Presupposition Logic}\label{defn:presop}Presupposition
Logic is defined over the language of Propositional Logic with an extra unary
operator $\partial$. It has the dynamic notion of semantic entailment
above, and semantics consisting of the update semantics for atomic
propositions and standard connectives combined with the following
interpretation for $\partial$-formulae:
\begin{eqnarray*}
\sigma\sem{\partial\phi}{}\tau & \ifof & \sigma\models\phi
\end{eqnarray*}
\end{defn}
As an example the sentence ``Mary realises that John is sleepy'' might
be said to correspond to a formula in Presupposition Logic of the form
$\partial p\;\En\;q$, where $p$\ is atomic proposition that John is
sleepy, and $q$\ is an atomic proposition that Mary has come to
believe that John is sleepy. In this paper I will not be concerned
with the details of how such logical forms may be derived from natural
language, or with the question of whether it is reasonable to use a
representation in which presuppositions are divided explicitly from
assertions using the $\partial$-operator. However, both these issues
are dealt with elsewhere: see
\cite{beaver:kmg,beaver:thesis,beaver:official} where two sorted
versions of classical type theory are used to provide a Presupposition
Logic style semantics for a fragment of natural language.
\newcommand{\Pre}{>\!\!>}
Suppose a formula contains a presuppositional sub-formula. What will
the presuppositions of the whole formula be? This is the
presupposition projection problem of Langendoen and Savin \cite{ls},
except applied to Presupposition Logic rather than natural language.
A formula $\partial\phi$, ``the presupposition that $\phi$'', defines an update
if and only if $\phi$\ is satisfied, so it is natural to say that in
general a formula presupposes all those formulae that must be
satisfied by the input context in order for there to be an update. We
say that a context $\sigma$\ admits a formula $\phi$\ (written
$\sigma\rhd\phi$) if and only if it is possible to update $\sigma$\
with $\phi$, this being a formalisation of Karttunen's notion of admittance in
\cite{kart:74}. In that case one formula $\phi$\ presupposes another
$\psi$ (written $\phi\Pre \psi$) just in case every context that admits
the first satisfies the second. Note that admittance provides a
counterpart to so-called {\em presupposition failure}, what happens
when updating cannot continue because presuppositions are not
satisfied.
\begin{defn}{Admits ($\rhd$) and Presupposes ($\Pre$)}\label{defn:admit}
\begin{eqnarray*}
\sigma\rhd\psi & \ifof & \exists\tau \sigma\sem{\phi}{}\tau \\
\phi \Pre \psi & \ifof & \forall \sigma\;\sigma\rhd\phi\Rightarrow
\sigma\models\psi
\end{eqnarray*}
\end{defn}
It is clear that the definition's for the semantics of the
$\partial$-operator and the meta-logical
$\Pre $\ relation are closely related. For example we have that for any
$\phi$, $\partial\phi \Pre \phi$. Indeed, $\Pre $\ could have been
equivalently defined in terms of $\partial$, defining
$\phi\Pre \psi\;\;\ifof\;\;$for some $\chi$, $\sem{\phi}{} =
\sem{\partial\psi\En\chi}{}$. We can now study projection
in Presupposition Logic. As detailed in \ref{fact:kart}, the system behaves just
as
anyone familiar with Karttunen's 1974 system would expect. In
particular, note that a formula may fail to carry a presupposition of one of its
component subformulae, but instead carry a logically weaker conditionalised
variant.
\begin{fact}
\label{fact:kart}\ \\
If $\phi \Pre \psi$\ then:
\begin{eqnarray*}
\Niet\phi & \Pre & \psi\\
\phi \En \chi & \Pre & \psi\\
\phi \Implic \chi & \Pre & \psi\\
\phi\Of\chi & \Pre & \psi\\
\chi\En\phi & \Pre & \chi\Implic\psi\\
\chi\Implic\phi& \Pre & \chi\Implic\psi\\
\chi\Of\phi& \Pre & (\Niet\chi)\Implic\psi
\end{eqnarray*}
\end{fact}
\section{The Naive Reader}
Readers who updated their own information state according
to principles like those behind the semantics of Presupposition Logic would be
stymied whenever some information was presupposed which they did not have. In
this situation the reader would lack any further means of updating.
So Presupposition Logic, as it stands, does not provide a good model of
the evolution of the information state of a hearer or reader. But
suppose that you only had a Presupposition Logic-like semantics
to help you understand a text. How would you use that semantics to
glean information?
If an infinite number of monkeys with typewriters were given time,
some of them might produce this text. What if an infinite group of
monkeys schooled in the writer-director approach to writing described
above were given some rhetorical goal but no description of the
intended audience? They might choose the
initial common ground randomly, although after that each monkey's view
of the common ground at a particular point in the text would be fully
determined by what they typed. Now although as a reader you do not know what
initial common ground has been assumed when you read a text, you can
reason for any particular choice of initial conditions how the common
ground would evolve. And this leads me to a suggestion for how a
dynamic semantics like that given for Presupposition Logic could be used to
understand a text without presupposition failure being problematic.
Begin the reading process by imagining an infinite number of monkeys
(or as many as you can manage) with an infinite number of assumed initial common
grounds, your goal being to find out which monkey wrote the text. As you read,
separately update each of these contexts. At various stages presuppositional
constructions will be encountered, and these are what sort out the
wheat from the chaff. For whenever something is presupposed which has
not been explicitly introduced earlier in the text, a number of
monkeys drop out of contention, and there remain only those monkeys
for which the assumed common ground corresponding to that point in the
text satisfies the presupposition. In general, this process may not
tell you exactly which monkey was responsible, but it will at least
limit the options, and it will simultaneously tell you quite a lot
about what information you were intended to have after reading the
text.
The many-monkeys strategy can easily be formalised in terms of the
semantics of Presupposition Logic. A reader's information state is
identified with a set of contexts --- I will use the term {\em
information set} --- and is thus a subset of the powerset of
worlds. A state can be updated with a formula
by updating each of the member contexts separately, so producing the following
definition of the update of a state $I$\ with a formula $\phi$:
\begin{defn}{Updating Information Sets}
\begin{eqnarray*}
I + \phi = \{\tau\mid\exists\sigma\in I\;\sigma\sem{\phi}{}\tau\}
\end{eqnarray*}
\end{defn}
By definition, let us say that an information set
satisfies a formula only if its member contexts satisfy the formula:
\begin{defn}{Satisfaction by an Information Set}
\begin{eqnarray*}
I\models\phi & \ifof & \forall \sigma\in I\;\sigma\models\phi
\end{eqnarray*}
\end{defn}
The earlier notion of entailment could easily be defined in terms of
the new notion of information, as the following fact demonstrates.
(Here $\cal{P}(W)$\ is the powerset of the set of worlds $W$.)
\begin{fact}
\begin{eqnarray*}
\phi\models\psi & \ifof & (\cal{P}(W) +\phi)\models\psi\\
& \ifof & \forall I\; (I+\phi)\models\psi
\end{eqnarray*}
\end{fact}
\section{The Sophisticated Reader}
The naive reader might imagine an infinite number of monkeys, and use
only information from the text to help find out which monkey is the
author. But other information is available, if not of an absolute
character. We cannot initially say of any given proposal as to the
assumed common ground that it is impossible, and to this extent it is
necessary to consider all possibilities. But we can say that some
proposals are relatively more plausible than others. The sophisticated
reader considers what assumption the author is {\em likely} to have
made as to the initial common ground.
The
model I will now propose will be of sufficient generality that the
particular method of guessing at the author's assumptions will not be
important here. The assumptions of the author, whatever they are, determine
a Presupposition Logic context, a set of worlds. A reader's knowledge
of which assumptions are most plausible determines an ordering over
these contexts, what I will call a {\em plausibility ordering}. A
plausibility ordering relative to some model is a reflexive,
transitive binary relation over a subset of the powerset of the set of
worlds. For an ordering $\pi$,
$\sigma\geq_{\pi}\tau$\ is written for $\tup{\sigma,\tau}\in\pi$, and $\sigma
>_{\pi}\tau$\ is taken to mean that both $\sigma\geq_{\pi}\tau$\ and
$\tau\not\geq_{\pi}\sigma$. An ordering $\pi$\ can be updated with a
new formula by considering every pair in the ordering, and updating
each element of the pair separately according to the principles of
Presupposition Logic. The following definition is obtained:
\begin{defn}{Updating Plausibility Orderings}
\begin{eqnarray*}
\pi + \phi & = &
\{\tup{\sigma,\tau}\mid\exists\tup{\sigma',\tau'}\in\pi\;
\sigma'\sem{\phi}{}\sigma\;\En\;
\tau'\sem{\phi}{}\tau\}
\end{eqnarray*}
\end{defn}
Under this definition, certain contexts may drop out of contention in
the update process, just as with the naive updating process considered
earlier. An example may clarify. Suppose that
$\sem{\phi}{}\;=\;\{\tup{\sigma,\sigma'},\tup{\upsilon,\upsilon'}\}$,
and that we wished to update the ordering
$\pi\;=\;\{\tup{\sigma,\sigma},\tup{\tau,\tau},\tup{\upsilon,\upsilon},
\tup{\sigma,\tau}, \tup{\sigma,\upsilon},\tup{\tau,\upsilon}\}$\ with
$\phi$. Returning to the earlier metaphor, the reader is considering
three different {\em movies} that the writer-director might have
intended, and at the current point in the text the candidates for the
correct frame are $\sigma$, $\tau$\ and $\upsilon$, with a
plausibility ordering $\sigma\geq_{\pi}\tau\geq_{\pi}\upsilon$. The
reader should now verify that $\pi + \phi\;=\;
\{\tup{\sigma',\sigma'}, \tup{\upsilon',\upsilon'},
\tup{\sigma,\upsilon}\}$. Observe that since $\tau$\ cannot be updated
with $\phi$, there is no next frame from the film containing $\tau$\
in the new ordering, and we are left with only two candidate films,
with current frames $\sigma'$\ and $\upsilon'$\ ordered
$\sigma'\geq_{\pi+\phi}\upsilon'$. So the fact that frames $\sigma$\
and $\upsilon$\ were in a certain ordering relation means that the
next frames in those films are in the corresponding ordering relation
after update. A more sophisticated model would perhaps allow juggling
of orderings in the update process, to allow for what Grice-style
conversational analysis might tell us about the author's knowledge and
intentions.
Before considering how we might make use of plausibility orderings, let us
see how the earlier notion of entailment could be defined in terms of them.
The {\em domain} of an ordering $\pi$, written $\star\pi$, can be defined as the
set of contexts which are at least as plausible as themselves in the ordering,
and this allows retrieval from a plausibility ordering of a corresponding
information set. This in turn permits the definition of a notion of satisfaction
of a formula by a plausibility ordering in terms of the ealier notion of
satisfaction by an information set:
\begin{defn}{Domain of an ordering and `Ordinary' Satisfaction}
\begin{eqnarray*}
\star\pi & = & \{\sigma\mid \sigma\geq_{\pi}\sigma\}\\
\pi\models\phi & \ifof & \star\pi\models\phi
\end{eqnarray*}
\end{defn}
Given such a notion of satisfaction, it should be clear that it would be
straightforward to define a notion of entailment equivalent to that
given earlier. However, it is also possible to define alternative
notions of entailment relative to any given plausibility ordering. Let us say
that
the set of {\em preferred contexts} in an ordering $\pi$, written $\uparrow\pi$,
is
the set of all contexts which are at least as plausible as any context in the
ordering. Then we can say that an ordering $\pi$\ {\em preferentially satisfies}
a
formula
$\phi$, written $\pi\rhd\phi$, if the set of preferred contexts in $\pi$\
satisfies
$\phi$. Preferential satisfaction is a weaker notion than satisfaction, in
that an ordering may preferentially satisfy more formulae than it
satisfies. We may now say that a formula $\phi$\ preferentially entails a
formula $\psi$\ relative to an ordering $\pi$, written $\phi\rhd_{\pi}\psi$,
if updating $\pi$\ with $\phi$\ produces an ordering which preferentially
satisfies $\psi$. Here are the formal definitions:
\begin{defn}{Preferential Satisfaction and Entailment}
\begin{eqnarray*}
\uparrow\pi & = & \{\sigma\mid\forall \tau\in\star\pi\;\sigma\geq \tau\}\\
\pi\rhd\phi & \ifof & \uparrow\pi\models\phi\\
\phi\rhd_{\pi}\psi & \ifof & \pi + \phi \rhd \psi
\end{eqnarray*}
\end{defn}
In the case of a trivial ordering consisting of the cross-product of the
powerset of worlds $\pi_{0} \;=\; {\cal P}\times{\cal P}$, for which every set
of worlds is at least as plausible as every other set of worlds, this notion
collapses into the earlier entailment:
\begin{fact}
\begin{eqnarray*}
\phi\models\psi & \ifof & \phi\rhd_{\pi_{0}}\psi
\end{eqnarray*}
\end{fact}
Consider the following pair of sentences:
\begin{description}
\item[1] \label{eg:janetakes}{\sf If Jane takes a bath, Bill will be annoyed
that there is no more hot water.}
\item[2] \label{eg:janewants}{\sf If Jane wants a bath, Bill will be annoyed
that there is no more hot water.}
\end{description}
An utterance of (1) does not suggest to me that there actually is no more
hot water, but only that if Jane takes a bath, there will be no more
hot water. On the other hand, (2) suggests strongly that there is no
more hot water. Put another way, (1) is compatible with the standard
CCP prediction of a conditional reading, but (2) is not.
The current theory will predict the contrast
provided the following plausibility assumptions hold:
\begin{itemize}
\item At least one alternative in which it is established that there is no
hot water is more plausible than all alternatives in which it is not known
whether there is hot water, but in which it is known that if Jane wants a
bath then there will be no hot water.
\item An alternative in which it is not known whether or not there is
hot water but in which it is established that if Jane has a bath then
there will be no more hot water must be at least as plausible as all
alternatives where it is definitely established that there is no hot
water.
\end{itemize}
The general question is, why would it be reasonable to expect
plausibility orderings to have such properties? My answer to this
question is on the one hand both simple and obvious, and on the other
hand both awkward to implement and incompatible with many contemporary
theories of presupposition. Many linguists will surely find it
unpalatable. The answer is: common sense.
Let me expand on this. The contrast between (1) and
(2) results from our ability to find a common-sensical
explanation of the lack of hot water in terms of somebody having taken
a bath, but in our inability to fully explain a lack of hot water in
terms of somebody simply wanting a bath. The simple assumption that
there is a finite amount of relevant hot water --- about a bathful ---
is sufficient to allow justification of there being no more hot water
in situations where Jane has just taken a bath. However, the same
simple assumption would not suffice in the case of (2),
and a number of other assumptions would be needed, such as the
assumption that if Jane wants a bath then she will definitely take
one. Thus it is the relative plausibility of assumptions not
explicitly mentioned in the text of the example sentences that
determines what is implicated.
Let us see how some of this analysis of (1) and (2) may be crudely
formalised. For expositional purposes, I will ignore many obviously relevant
issues, such as temporal connections between antecedent and consequent
clauses in the conditionals. Let us represent ``Jane takes a bath'' as ``{\sc
jtb}'', ``Jane wants a bath'' as {\sc jwb}'', ``there is no hot water'' as
{\sc nhw}, and ``Bill will be annoyed that there is no more hot water'' as
$\partial${\sc nhw}$\;\En\;${\sc ba}. Now suppose that our common
sense knowledge of the relative plausibility of different assumptions is
encoded in a plausibility ordering $\pi$. The two conditions required of
$\pi$\ may be formalised as follows:
\begin{itemize}
\item $\begin{array}{l}
\exists \sigma\in\pi\;\sigma\models\mbox{\sc nhw}\mbox{\ and\ }\\
\forall \tau\in\pi \mbox{\ if\ } (\tau\not\models\mbox{\sc nhw}\mbox{\ and\ }
\tau\models\;\mbox{\sc jwb}\Implic\mbox{\sc nhw})
\mbox{\ then\ }\sigma >_{\pi} \tau
\end{array}$
\item $\begin{array}{l}
\exists \sigma\in\pi\;\sigma\not\models\mbox{\sc nhw}\mbox{\ and\ }
\sigma\models\;\mbox{\sc jtb}\Implic\mbox{\sc nhw}\mbox{\ and\ }\\
\forall \tau\in\pi \mbox{\ if\ } \tau\models\mbox{\sc nhw}
\mbox{\ then\ }\sigma \geq_{\pi} \tau
\end{array}$
\end{itemize}
If $\pi$\ conforms to these requirements, then we have the following
preferential entailments:
\begin{eqnarray*}
\mbox{\sc jwb}\Implic(\partial\mbox{\sc nhw}\;\En\;\mbox{\sc ba})
& \rhd_{\pi} & \mbox{\sc nhw}\\
\mbox{\sc jtb}\Implic(\partial\mbox{\sc nhw}\;\En\;\mbox{\sc ba})
& \not\!\!\rhd_{\pi} & \mbox{\sc nhw}\\
\mbox{\sc jtb}\Implic(\partial\mbox{\sc nhw}\;\En\;\mbox{\sc ba})
& \rhd_{\pi} & \mbox{\sc jtb}\Implic\mbox{\sc nhw}
\end{eqnarray*}
In other words, with respect to $\pi$, (1) preferentially
entails that there is no more hot water, and (2) preferentially
entails not there is no more hot water, but that if Jane takes a bath
then there is no more hot water.
\section{Discussion}
The theory here developed provides
a formal characterisation of what Lewis [1979] called {\em
accommodation}.
But in its current form the model differs markedly from existing
proposals, in that most writers have taken accommodation to be some
sort of repair strategy, something that happens when the
interpretation process goes wrong. Lewis seems to picture
accommodation as a covert adjustment of what he calls the {\em
conversational score}, a sort of creative accounting needed to make
conversational ends meet. Van der Sandt's accommodation, to take a
more recent example, is a sophisticated cut-and-paste operation on
discourse representation structures. Thus accommodation has been
viewed as an essentially non-monotonic operation, overwriting our
previous record of what had happened in a discourse to fit with new
demands. The view espoused in this paper has been quite the contrary.
Accommodation is seen as a {\em monotonic} operation, in the sense
that it does not replace or destructively revise our information about
a speaker or author, but further instantiates our knowledge, reducing
the range of possibilities for what the speaker was assuming.
Yet this monotonicity is not crucial, it is merely symptomatic of the
differences between the approach I have espoused and others.
The main claim I will make is that when we accommodate,
we look not only at the record of what has been said, but also
look behind what has been said, and consider explicitly what the author might
have
intended and what the author might have expected. This sort of reasoning
may be called {\em reconstructed reasoning} in the sense that it involves a
reconstruction of the speaker's assumptions and intentions. In the context of
existing theories of presupposition, the claim that an empirically adequate
account of presupposition must take reconstructed
reasoning into account appears radical. Yet the model proposed is intended to be
{\em conservative}, in that an existing line of research
(i.e. the Karttunen/Stalnaker/Lewis/Heim dynamic treatment of presupposition)
is used as the basis of a model which incorporates this sort of reasoning
process.
In the full paper I will show in more detail why the failure of existing
theories
of presupposition to take this sort of reconstructed reasoning into account
makes
them necessarily inadequate empirically, and how the success of the current
model
in dealing with examples (1) and (2) above is generalised to a large class of
traditionally problematic linguistic examples (e.g. those involving {\em
conditionalised presuppositions}) drawn from the literature.
{\small
\begin{thebibliography}{Beaver to appear}
\bibitem[Bea92]{beaver:kpl}David Beaver, {\em The Kinematics of
Presupposition} Proceedings
of the Eighth Amsterdam Colloquium, ILLC, University of Amsterdam
%
\bibitem[Bea93a]{beaver:kmg} `Kinematic Montague Grammar', in Kamp, H.
(ed.), {\sc dyana-2} deliverable R2.2a, University of Amsterdam
%
\bibitem[Bea93b]{beaver:thesis}Beaver, D.,
{\em What Comes First in Dynamic Semantics}, {\sc illc} report LP-93-15,
University of Amsterdam.
%
\bibitem[Bea94]{beaver:salt}Beaver, D.,
``When Variables Don't Vary Enough'', in Harvey, M. \& L.Santelmann (eds.),
{\em SALT 4}, Cornell
%
\bibitem[Bea95]{beaver:official}Beaver, D.,
{\em Presupposition and Assertion in Dynamic Semantics}, PhD Dissertation,
University of Edinburgh.
%
\bibitem[BeaMS]{beaver:handbook}Beaver, D.,
``Presupposition'', to appear in van Benthem, J. and A.ter Meulen (eds.),
{\em The Handbook of Logic and Linguistics}, Elsevier
%
\bibitem[Gaz79]{gazdar}Gazdar, G., {\em Pragmatics: Implicature,
Presupposition and Logical Form}, Academic Press, New York
%
\bibitem[GS91]{gs:dpl}
Groenendijk, J. and M. Stokhof, {\em Dynamic Predicate
Logic}, in Linguistics and
Philosophy 14(1), 1991, 39--100.
%
\bibitem[Hei82]{heim:thesis}Heim, I.,{\em On the
semantics of Definite and Indefinite Noun Phrases}, PhD dissertation,
University of Amherst.
%
\bibitem[Hei83]{heim:83}Heim, I., {\em On the Projection Problem for
Presuppositions}, in {\em WCCFL `83}
%
\bibitem[Kam81]{kamp:drt}Kamp, H., `A Theory of Truth and semantic
Representation', in Groenendijk, Janssen \& Stokhof (eds.) {\em
Formal Methods in the Study of Language}
%
\bibitem[Kar74]{kart:74}Karttunen, L.,`Presuppositions and Linguistic
Context', Theoretical Linguistics~1
%
\bibitem[LS71]{ls} Langendoen, D. and H. Savin, `The Projection
Problem for Presuppositions', in Fillmore \& Langendoen (eds.), {\em
Studies in
Linguistic Semantics} Holt, Reinhardt \& Winston, New York
%
\bibitem[Lew79]{lewis}Lewis, D., `Scorekeeping in a
Language Game' in {\em B\"{a}uerle et al.(eds.) Semantics from
Different Points of View}, Berlin
%
\bibitem[Mer92]{mercer}Mercer, R.,{\em Default Logic: Towards a
Common Logical Semantics for Presupposition and Entailment}, Journal of
Semantics 9:3
%
\bibitem[vdS88]{vds:cap}van der Sandt, R., {\em Context and
Presupposition}, Croom Helm, London
%
\bibitem[vdS92]{vds}van der Sandt, R., {\em Presupposition Projection
as Anaphora Resolution}, Journal of Semantics 9:4
%
\bibitem[Sta74]{sta}Stalnaker, R., `Pragmatic
Presuppositions', in Munitz
\& Unger, (eds.) {\em Semantics and Philosophy}, NYP
%
\bibitem[Vel91]{veltman:us} Veltman, F., {\em Defaults in
update semantics}, in {\sc dyana} deliverable R2.5.C, (also: to appear
Journal of Philosophical Logic)
%
\end{thebibliography}}
\end{document}