# davewww.bib

@inproceedings{DelTedesco+:ICISS11,
author = {F. {Del Tedesco} and S. Hunt and David Sands},
title = {A Semantic Hierarchy for Erasure Policies},
booktitle = {Seventh International Conference on Information Systems Security},
year = 2011,
xseries = {LNCS},
publisher = {Springer Verlag},
pdf = {http://www.cse.chalmers.se/~dave/papers/ICISS11.pdf},
abstract = {We consider the problem of logical data erasure,
contrasting with physical erasure in the same way
that end-to-end information flow control contrasts
with access control.  We present a semantic
hierarchy for erasure policies, using a
possibilistic knowledge-based semantics to define
policy satisfaction such that there is an
intuitively clear upper bound on what information an
erasure policy permits to be retained. Our hierarchy
allows a rich class of erasure policies to be
expressed, taking account of the power of the
attacker, how much information may be retained, and
under what conditions it may be retained.  While our
main aim is to specify erasure policies, the
semantic framework allows quite general
information-flow policies to be formulated for a
variety of semantic notions of secrecy.  }
}

@inproceedings{Mantel+:CSF11,
author = {Heiko Mantel and David Sands and Henning Sudbrock},
title = {Assumptions and Guarantees for Compositional
Noninterference},
booktitle = {Proceedings of the 24th IEEE Computer Security Foundations
Symposium},
publisher = {IEEE Computer Society},
year = 2011,
pages = {218--232},
pdf = {http://www.cse.chalmers.se/~dave/papers/CSF2011.pdf},
abstract = {The idea of building secure systems by plugging together
secure'' components is appealing, but this
requires a definition of security which, in addition
to taking care of top-level security goals, is
strengthened appropriately in order to be
compositional.  This approach has been previously
studied for information-flow security of
shared-variable concurrent programs, but the price
for compositionality is very high: a thread must be
extremely pessimistic about what an environment
might do with shared resources.  This pessimism
labelled as insecure.  Since in practice it is only
protocol for data access, we take advantage of this
to develop a more liberal compositional security
condition.  The idea is to give the security
usage, as expressed by assumption-guarantee style
conditions associated with each thread.  We
illustrate the improved precision by developing the
first flow-sensitive security type system that
provably enforces a noninterference-like property
for concurrent programs.  }
}

@inproceedings{Hunt:Sands:ESOP11,
author = {S. Hunt and David Sands},
title = {From Exponential to Polynomial-time Security Typing
via Principal Types},
booktitle = {Programming Languages and Systems. 20th European Symposium on Programming, ESOP 2011},
xpages = {},
year = {2011},
number = {6602},
series = {LNCS},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hunt-Sands-ESOP11.pdf},
publisher = {Springer Verlag},
abstract = {Hunt and Sands (POPL'06) studied a flow sensitive type
(FST) system for multi-level security, parametric in
the choice of lattice of security levels.  Choosing
the powerset of program variables as the security
lattice yields a system which was shown to be
equivalent to Amtoft and Banerjee's Hoare-style
independence logic (SAS'04). Moreover, using the
powerset lattice, it was shown how to derive a
principal type from which all other types (for all
choices of lattice) can be simply derived. Both of
these earlier works gave "algorithmic" formulations
of the type system/program logic, but both
algorithms are of exponential complexity due to the
iterative typing of While loops. Later work by Hunt
and Sands (ESOP'08) adapted the FST system to
provide an erasure type system which determines
whether some input is correctly erased at a
designated time. This type system is inherently
exponential, requiring a double typing of the
erasure-labelled input command. In this paper we
start by developing the FST work in two key ways:
(1) We specialise the FST system to a form which
only derives principal types; the resulting type
system has a simple algorithmic reading, yielding
principal security types in polynomial time. (2) We
show how the FST system can be simply extended to
check for various degrees of termination sensitivity
(the original FST system is completely termination
insensitive, while the erasure type system is fully
termination sensitive).We go on to demonstrate the
power of these techniques by combining them to
develop a type system which is shown to correctly
implement erasure typing in polynomial
time. Principality is used in an essential way to
reduce type derivation size from exponential to
linear.}
}

@inproceedings{DelTedesco+:Implementing,
author = {Filippo {Del Tedesco} and Alejandro Russo and David Sands},
title = {Implementing Erasure Policies Using Taint Analysis},
sensitive information in order to function. But in accordance with
the principle of least privilege -- or perhaps simply for legal
compliance -- such applications should not retain said information once
it has served its purpose. In such scenarios, the timely disposal of data
is known as an \emph{information erasure policy}.
This paper studies software-level
information erasure policies for the data manipulated by programs.
The paper presents a new approach to the enforcement of such policies.
We adapt ideas from dynamic taint analysis to track how sensitive data sources propagate through a program and erase them on demand. The method is implemented for Python as a library, with no modifications to the runtime system.
The library is easy to use, and allows programmers to indicate information-erasure policies with only
minor modifications to their code.
},
optcrossref = {},
optkey = {},
booktitle = {The 15th Nordic Conference in Secure IT Systems},
optpages = {},
year = {2010},
editor = {Tuomas Aura},
optvolume = {},
optnumber = {},
series = {LNCS},
month = {October},
optorganization = {},
publisher = {Springer Verlag},
optnote = {},
pdf = {http://www.cse.chalmers.se/~dave/papers/erasureTaint.pdf}
}

@inproceedings{Magazinius+:Safe,
author = {Jonas Magazinius and Phu H. Phung and David Sands},
title = {Safe Wrappers and Sane Policies for Self Protecting {J}ava{S}cript},
abstract = { Phung \emph{et al} (ASIACCS'09) describe a method for wrapping built-in
methods of JavaScript programs in order to enforce security
policies. The method is appealing because it requires neither deep
transformation of the code nor browser modification. Unfortunately
the implementation outlined suffers from a range of vulnerabilities,
and policy construction is restrictive and error prone. In this paper we address these issues to provide a systematic way to avoid the identified vulnerabilities, and make it easier for the policy writer to construct declarative
policies -- i.e. policies upon which attacker code has no side effects.
},
booktitle = {The 15th Nordic Conference in Secure IT Systems},
optpages = {},
year = {2010},
editor = {Tuomas Aura},
optvolume = {},
optnumber = {},
series = {LNCS},
month = {October},
optorganization = {},
publisher = {Springer Verlag},
note = {(Selected papers from AppSec 2010)},
pdf = {http://www.cse.chalmers.se/~dave/papers/SafeWrappers.pdf}
}

@inproceedings{Broberg:Sands:POPL10,
author = {Niklas Broberg and David Sands},
title = {Paralocks -- Role-Based Information Flow Control and Beyond},
booktitle = {POPL'10, Proceedings of the 37th Annual ACM SIGACT-SIGPLAN Symposium on Principles of Programming Languages},
year = 2010,
abstract = {This paper presents Paralocks, a language for building expressive
but statically verifiable fine-grained information flow policies.
Paralocks combine the expressive power of Flow Locks (Broberg \& Sands,
ESOP'06) with the ability to express policies involving run-time
principles, roles (in the style of role-based access control), and
relations (such as acts-for'' in discretionary access control).  We
illustrate the Paralocks policy language by giving a simple encoding
of Myers and Liskov's Decentralized Label Model (DLM). Furthermore --
and unlike the DLM -- we provide an information flow semantics for full
Paralock policies.  Lastly we illustrate how Paralocks can be
statically verified by providing a simple programming language
incorporating Paralock policy specifications, and a static type system which
soundly enforces information flow security according to the Paralock
semantics.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Broberg-Sands-POPL10.pdf}
}

@inproceedings{Svenningsson:Sands:FAST09,
author = {Josef Svenningsson and David Sands},
title = {Specification and Verification of Side Channel Declassification},
booktitle = {The sixth International Workshop on Formal Aspects in Security and Trust (FAST2009) },
optpages = {},
year = {2009},
opteditor = {},
optvolume = {},
optnumber = {},
optseries = {},
month = {November},
optorganization = {},
series = {LNCS},
publisher = {Springer},
note = {Pre-proceedings version, plus appendix.},
pdf = {http://www.cse.chalmers.se/~dave/papers/SS-FAST09.pdf},
abstract = {Side channel attacks have emerged as a serious threat to the
security of both networked and embedded systems -- in particular through
the implementations of cryptographic operations. Side channels can
be difficult to model formally, but with careful coding and program
transformation techniques it may be possible to verify security in
the presence of specific side-channel attacks. But what if a program
intentionally makes a tradeoff between security and efficiency and
leaks some information through a side channel? In this paper we
study such tradeoffs using ideas from recent research on
declassification. We present a semantic model of security for
programs which allow for declassification through side channels, and
show how side-channel declassification can be verified using
off-the-shelf software model checking tools. Finally, to make it
simpler for verifiers to check that a program conforms to a
particular side-channel declassification policy we introduce a
further tradeoff between efficiency and verifiability: by writing programs
in a particular manifest form'' security becomes considerably easier to
verify.}
}

@inproceedings{DelTedesco:Sands:Secco09,
author = {Filippo Del Tedesco and David Sands},
title = {A User Model for Information Erasure},
optcrossref = {},
optkey = {},
booktitle = {SecCo'09, 7th International Workshop on Security Issues in Concurrency},
optpages = {},
year = {2009},
opteditor = {},
optvolume = {},
optnumber = {},
series = {Electronic Proceedings in Theoretical Computer Science},
optmonth = {},
optorganization = {},
optpublisher = {},
note = {To appear},
abstract = { Hunt and Sands (ESOP'08) studied a notion of \emph{information
erasure} for systems which receive secrets intended for
limited-time use. Erasure demands that once a secret has fulfilled
its purpose the subsequent behaviour of the system should reveal
no information about the erased data. In this paper we address a
shortcoming in that work: for erasure to be possible the user who
provides data must also play his part, but previously that role was
only specified informally. Here we provide a formal model of the
user and a collection of requirements called \emph{erasure
friendliness}. We prove that an erasure-friendly user can be
composed with an erasing system (in the sense of Hunt and Sands) to
obtain a combined system which is \emph{jointly erasing} in an
appropriate sense. In doing so we identify stronger requirements on the user than those informally described in the previous work. },
pdf = {http://www.cse.chalmers.se/~dave/papers/DelTedescoSandsSecCo09.pdf}
}

@inproceedings{Broberg:Sands:PLAS09,
author = {Niklas Broberg and David Sands},
title = {Flow-Sensitive Semantics for Dynamic Information Flow Policies},
booktitle = {ACM SIGPLAN Fourth Workshop on
Programming Languages and Analysis for Security (PLAS 2009)},
year = 2009,
editor = {S. Chong and D. Naumann},
month = {June 15},
publisher = {ACM},
abstract = {Dynamic information flow policies, such as declassification,
are essential for practically useful information flow control systems.
However, most systems proposed to date that handle dynamic information
flow policies suffer from a common drawback. They build on semantic models of security which are inherently flow
insensitive, which means that many simple intuitively secure programs
will be considered insecure.

In this paper we address this problem in the context of a particular
system, flow locks. We provide a new flow sensitive semantics for flow
locks based on a knowledge-style definition (following Askarov and
Sabelfeld), in which the knowledge gained by an actor
observing a program run is constrained according to the flow locks
which are open at the time each observation is made. We demonstrate
the applicability of the definition in a soundness proof for a simple
flow lock type system. We also show how other systems can be encoded
using flow locks, as an easy means to provide these systems with flow
sensitive semantics.},
pdf = {http://www.cse.chalmers.se/~dave/papers/BrobergSandsPLAS09.pdf}
}

@inproceedings{Phung:Sands:Chudnov:ASIACCS09,
author = {Phu H. Phung and David Sands and Andrey Chudnov},
title = {Lightweight Self-Protecting JavaScript},
booktitle = {ACM Symposium on Information, Computer and Communications Security (ASIACCS 2009)},
optpages = {},
year = {2009},
editor = {R. Safavi-Naini and V. Varadharajan},
month = {March},
publisher = {ACM Press},
pdf = {http://www.cse.chalmers.se/~dave/papers/ASIACCS09.pdf},
abstract = { This paper introduces a method to control
JavaScript execution. The aim is to prevent or
modify inappropriate behaviour caused by
e.g. malicious injected scripts or poorly designed
third-party code. The approach is based on modifying
the code so as to make it self-protecting: the
protection mechanism (security policy) is embedded
into the code itself and intercepts security
relevant API calls. The challenges come from the
nature of the JavaScript language: any variables in
the scope of the program can be redefined, and code
can be created and run on-the-fly. This creates
potential problems, respectively, for
tamper-proofing the protection mechanism, and for
ensuring that no security relevant events bypass the
protection.  Unlike previous approaches to
instrument and monitor JavaScript to enforce or
adjust behaviour, the solution we propose is
\emph{lightweight} in that (i) it does not require a
modified browser, and (ii) it does not require any
run-time parsing and transformation of code
(including dynamically generated code). As a result,
the method has low run-time overhead compared to
other methods satisfying (i), and the lack of need
for browser modifications means that the policy can
even be applied on the server to mitigate some
effects of cross-site scripting bugs.
}
}

@techreport{Demange:Sands:ESOPfull,
author = {D. Demange and David Sands},
title = {All Secrets Great and Small},
institution = {Computer Science and Engineering,
Chalmers University of Technology},
number = {2009-01},
year = {2009},
pdf = {http://www.cse.chalmers.se/~dave/papers/Demange-Sands-TR09-1.pdf},
month = {April},
note = {Extended version of \cite{Demange:Sands:ESOP09}},
optannote = {}
}

@inproceedings{Demange:Sands:ESOP09,
author = {D. Demange and David Sands},
title = {{All Secrets Great and Small}},
booktitle = {Programming Languages and Systems. 18th European Symposium on Programming, ESOP 2009},
pages = {207--221},
year = {2009},
number = {5502},
series = {LNCS},
pdf = {http://www.cse.chalmers.se/~dave/papers/DemangeSandsESOP09.pdf},
publisher = {Springer Verlag},
abstract = {
Tools for analysing secure information flow are almost exclusively
based on ideas going back to Denning's work from the 70's. This
approach embodies an imperfect notion of security which turns a blind
eye to information flows which are encoded in the termination
behaviour of a program. In exchange for this weakness many more
programs are deemed secure'', using conditions which are easy to
check. Previously it was thought that such leaks are limited to at
most one bit per run. Recent work by Askarov et al (ESORICS'08) offers
some bad news and some good news: the bad news is that for programs
which perform output, the amount of information leaked by a Denning
style analysis is not bounded; the good news is that if secrets are
chosen to be sufficiently large and sufficiently random then they cannot be
effectively leaked at all. The problem addressed in this paper is that
secrets cannot always be made sufficiently large or sufficiently
random. Contrast, for example, an encryption key with an
hasHIV''-field of a patient record. In recognition of this we develop
a notion of \emph{secret-sensitive noninterference} in which small'' secrets
are handled more carefully than big'' ones.  We illustrate the idea
with a type system which combines a liberal Denning-style analysis
with a more restrictive system according to the nature of the secrets
at hand.
}
}

@inproceedings{Askarov+:ESORICS08,
title = {{Termination-Insensitive Noninterference Leaks More Than Just a Bit}},
author = {Askarov, A. and Hunt, S. and Sabelfeld, A. and Sands, D.},
booktitle = {The 13th European Symposium on Research in Computer Security (ESORICS 08, Malaga, Spain, October 6-8, 2008. Proceedings},
number = {5283},
series = {LNCS},
pdf = {http://www.cse.chalmers.se/~andrei/esorics08.pdf},
publisher = {Springer Verlag},
abstract = { Current tools for analysing information flow in programs
build upon ideas going back to Denning's work from
the 70's. These systems enforce an imperfect notion
of information flow which has become known as
termination-insensitive noninterference. Under this
version of noninterference, information leaks are
permitted if they are transmitted purely by the
program's termination behaviour (i.e., whether it
terminates or not). This imperfection is the price
to pay for having a security condition which is
relatively liberal (e.g. allowing while-loops whose
termination may depend on the value of a secret) and
easy to check. But what is the price exactly? We
argue that, in the presence of output, the price is
higher than the one bit'' often claimed informally
in the literature, and effectively such programs can
leak all of their secrets. In this paper we develop
a definition of termination-insensitive
programs with outputs. We show that the definition
generalises batch-job'' style definitions from the
literature and that it is indeed satisfied by a
Denning-style program analysis with output. Although
more than a bit of information can be leaked by
programs satisfying this condition, we show that the
best an attacker can do is a brute-force attack,
which means that the attacker cannot reliably (in a
technical sense) learn the secret in polynomial time
in the size of the secret. If we further assume that
secrets are uniformly distributed, we show that the
advantage the attacker gains when guessing the
secret after observing a polynomial amount of output
is negligible in the size of the secret.  },
pages = {333--348},
year = {2008}
}

@unpublished{Hunt:Sands:ESOP08-extended,
author = {S. Hunt and David Sands},
title = {Just Forget it -- The Semantics and Enforcement of Information Erasure},
note = {Extended version of \cite{Hunt:Sands:ESOP08} inluding proofs},
month = {April},
year = {2008},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hunt-Sands-ESOP08-extended.pdf},
optannote = {}
}

@inproceedings{Phung:Sands:COMPSAC08,
author = {P. H. Phung and David Sands},
title = {{Security Policy Enforcement in the OSGi Framework Using Aspect-Oriented Programming}},
booktitle = {Proceedings of the 32nd Annual IEEE International Computer
Software and Applications Conference, COMPSAC 2008, 28 July
-- 1 August 2008, Turku, Finland},
publisher = {IEEE Computer Society},
pages = {1076--1082},
year = {2008},
isbn = {978-0-7695-3262-2},
abstract = {
The lifecycle mismatch between vehicles and their IT system
poses a problem for the automotive industry. Such systems
need to be open and extensible to provide customised
functionalities and services. What is less clear is how to
achieve this with quality and security guarantees.
Recent studies in language-based security -- the use of
programming language technology to enforce application
specific security policies -- show that security policy enforcement
mechanisms such as inlined reference monitors
provide a potential solution for security in extensible systems.
In this paper we study the implementation of security
policy enforcement using aspect-oriented programming for
the OSGi (Open Services Gateway initiative) framework.
We identify classes of reference monitor-style policies that
can be defined and enforced using AspectJ, a well-known
aspect-oriented programming language. We demonstrate
the use of security states to describe history-based policies.
We also introduce and implement various levels of security
states in Java to describe session level history versus
global application level history. We illustrate the effectiveness
of the implementation by deploying the security policy
enforcement solution in an example scenario of software
},
pdf = {http://www.cse.chalmers.se/~dave/papers/PhungSandsCOMPSAC08.pdf}
}

@inproceedings{Hunt:Sands:ESOP08,
author = {S. Hunt and David Sands},
title = {Just Forget it -- The Semantics and Enforcement of Information Erasure},
booktitle = {Programming Languages and Systems. 17th European Symposium on Programming, ESOP 2008},
pages = {239--253},
year = {2008},
number = {4960},
series = {LNCS},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hunt-Sands-ESOP08.pdf},
publisher = {Springer Verlag},
abstract = {There are many settings in which sensitive information is
made available to a system or organisation for a specific purpose,
on the understanding that it will be erased once that purpose has
been fulfilled.  A familiar example is that of online credit card
transactions: a customer typically provides credit card details to a
payment system on the understanding that the following promises are
kept: (i) Noninterference (NI): the card details may flow to the
bank (in order that the payment can be authorised) but not %the
merchant; to other users of the system; (ii) Erasure: the payment
system will not retain any record of the card details once the
transaction is complete.  This example shows that we need to reason
about NI and erasure in combination, and that we need to consider
interactive systems: the card details are used in the interaction
between the principals, and then erased; without the interaction,
the card details could be dispensed with altogether and erasure
would be unnecessary.  The contributions of this paper are as
follows.  (i) We show that an end-to-end erasure property can be
encoded as a flow sensitive'' noninterference property.  (ii) By a
judicious choice of language construct to support erasure policies,
we successfully adapt this result to an interactive setting.  (iii)
We use this result to design a type system which guarantees that
well typed programs are properly erasing.  Although erasure policies
have been discussed in earlier papers, this appears to be the first
static analysis to enforce erasure.  }
}

@article{Sabelfeld:Sands:JCS09,
author = {A. Sabelfeld and David Sands},
title = {Declassification: Dimensions and Principles},
journal = {Journal of Computer Security},
year = {2009},
volume = 15,
number = 5,
pages = {517--548},
pdf = {http://www.cse.chalmers.se/~dave/papers/sabelfeld-sands-jcs07.pdf},
publisher = {IOS Press},
abstract = {Computing systems often deliberately release (or
declassify) sensitive information. A principal
security concern for systems permitting information
release is whether this release is safe: is it
possible that the attacker compromises the
information release mechanism and extracts more
secret information than intended? While the security
community has recognised the importance of the
problem, the state-of-the-art in information release
is, unfortunately, a number of approaches with
somewhat unconnected semantic goals. We provide a
road map of the main directions of current research,
by classifying the basic goals according to what
information is released, who releases information,
where in the system information is released and when
information can be released. With a general
declassification framework as a long-term goal, we
identify some prudent principles of
declassification. These principles shed light on
existing definitions and may also serve as useful
sanity checks" for emerging models.}
}

@inproceedings{Hedin:Sands:CSFW06,
title = {Noninterference in the presence of non-opaque pointers},
author = {D. Hedin and David Sands},
booktitle = {Proceedings of the 19th IEEE Computer Security
Foundations Workshop},
year = {2006},
publisher = {IEEE Computer Society Press},
xpages = {255--269},
abstract = { A common theoretical assumption in the study of information flow
security in Java-like languages is that pointers are opaque -- i.e.,
that the only properties that can be observed of pointers are the
objects to which they point, and (at most) their equality. These
assumptions often fail in practice.  For example, various important
operations in Java's standard API, such as hashcodes or
serialization, might break pointer opacity.  As a result,
information-flow static analyses which assume pointer opacity risk
being unsound in practice, since the pointer representation provides
an unchecked implicit leak. We investigate information flow in the
presence of non-opaque pointers for an imperative language with
records, pointer instructions and exceptions, and develop an
information flow aware type system which guarantees noninterference.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hedin-Sands-CSFW06.pdf}
}

@techreport{Broberg:Sands:flowlocks,
author = {N. Broberg and David Sands},
title = {Flow Locks: Towards a core calculus for Dynamic Flow Policies},
institution = {Chalmers University of Technology and G\"oteborgs University},
year = {2006},
pdf = {http://www.cse.chalmers.se/~dave/papers/Broberg-Sands-flowlocks-full.pdf},
month = {May},
note = { Draft. Extended version of \cite{Broberg:Sands:ESOP06}},
optannote = {}
}

@inproceedings{Hunt:Sands:POPL06,
author = {S. Hunt and David Sands},
title = {On Flow-Sensitive Security Types},
booktitle = {POPL'06, Proceedings of the 33rd Annual. ACM SIGPLAN - SIGACT. Symposium. on Principles of Programming Languages},
year = 2006,
abstract = { This article investigates formal properties of a family of
semantically sound flow-sensitive type systems for tracking
information flow in simple While programs. The family is indexed by
the choice of flow lattice.
By choosing the flow lattice to be the powerset of program variables,
we obtain a system which, in a very strong sense, subsumes all other
systems in the family (in particular, for each program, it provides a
principal typing from which all others may be inferred).  This
distinguished system is shown to be equivalent to, though more simply
described than, Amtoft and Banerjee's Hoare-style independence logic
(SAS'04).
In general, some lattices are more expressive than others.
Despite this, we show that no type system in the family can give
better results for a given choice of lattice than the type system
for that lattice itself.
Finally, for any program typeable in one of these systems, we show
how to construct an equivalent program which is typeable in a simple
flow-insensitive system.  We argue that this general approach could
be useful in a proof-carrying-code setting.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hunt-Sands-POPL06.pdf},
month = {January}
}

@inproceedings{Broberg:Sands:ESOP06,
author = {N. Broberg and David Sands},
title = {Flow Locks: Towards a core calculus for Dynamic Flow Policies},
booktitle = {Programming Languages and Systems. 15th European Symposium on Programming, ESOP 2006},
year = 2006,
volume = {3924},
series = {LNCS},
abstract = {Security is rarely a static notion. What is considered to be
confidential or untrusted data varies over time according to
changing events and states. The static verification of secure
information flow has been a popular theme in recent programming
language research, but information flow policies considered are
based on multilevel security which presents a static view of
security levels. In this paper we introduce a very simple mechanism
for specifying dynamic information flow policies, flow locks, which
specify conditions under which data may be read by a certain
actor. The interface between the policy and the code is via
instructions which open and close flow locks. We present a type and
effect system for an ML-like language with references which permits
the completely static verification of flow lock policies, and prove
that the system satisfies a semantic security property generalising
noninterference. We show that this simple mechanism can represent a
number of recently proposed information flow paradigms for
declassification.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Broberg-Sands-ESOP06.pdf},
publisher = {Springer Verlag}
}

@book{Axelsson:Sands:Understanding,
author = {S. Axelsson and David Sands},
alteditor = {},
title = {Understanding {I}ntrusion {D}etection through {V}isualization},
publisher = {Springer},
year = {2006},
optkey = {},
optvolume = {},
number = {24},
series = {Advances in Information Security},
url = {http://www.cse.chalmers.se/~dave/VisBook},
optnote = {},
optannote = {}
}

@inproceedings{Sabelfeld:Sands:CSFW05,
title = {Dimensions and Principles of Declassification},
author = {Andrei Sabelfeld and David Sands},
booktitle = {Proceedings of the 18th IEEE Computer Security
Foundations Workshop},
year = {2005},
publisher = {IEEE Computer Society Press},
pages = {255--269},
abstract = {  Computing systems often deliberately release (or declassify)
sensitive information. A principal security concern for systems
permitting information release is whether this release is safe: is
it possible that the attacker compromises the information release
mechanism and extracts more secret information than intended?
While the security community has recognised the importance of the
problem, the state-of-the-art in information release is,
unfortunately, a number of approaches with somewhat unconnected
semantic goals. We provide a road map of the main directions of
current research, by classifying the basic goals according to
\emph{what} information is released, \emph{who} releases
information, \emph{where} in the system information is released and
\emph{when} information can be released.
With a general declassification framework as a long-term goal, we
identify some prudent \emph{principles} of declassification. These
principles shed light on existing definitions and may also
serve as useful sanity checks'' for emerging models.},
pdf = {http://www.cse.chalmers.se/~dave/papers/sabelfeld-sands-CSFW05.pdf}
}

@inproceedings{Hedin:Sands:Timing,
author = {D. Hedin and David Sands},
title = {Timing Aware Information Flow Security for a
{J}ava{C}ard-like Bytecode},
booktitle = {First Workshop on Bytecode Semantics, Verification,
Analysis and Transformation (BYTECODE '05)},
year = 2005,
series = {Electronic Notes in Theoretical Computer Science (to
appear)},
abstract = {Common protection mechanisms fail to provide \emph{end-to-end} security;
leaking this to the world. Information-flow aware analyses track the flow of
information through the program to prevent such leakages, but often ignore
information flows through \emph{covert channels} even though they pose a
serious threat.  A typical covert channel is to use the timing of certain
events to carry information.  We present a timing-aware information-flow type
system for a low-level language similar to a non-trivial subset of a sequential
Java bytecode. The type system is parameterized over the time model of the
instructions of the language and over the algorithm enforcing low-observational
equivalence, used in the prevention of implicit and timing flows.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hedin-Sands-BYTECODE05.pdf}
}

@inproceedings{DHS05,
author = {\'{A}. Darvas and R. H{\"{a}}hnle and D.
Sands},
title = {A Theorem Proving Approach to Analysis of Secure
Information Flow},
booktitle = {Proc.\ 2nd International Conference on Security in
Pervasive Computing},
pages = {193--209},
year = {2005},
editor = {Dieter Hutter and Markus Ullmann},
volume = {3450},
series = {LNCS},
publisher = {Springer-Verlag},
abstract = {Most attempts at analysing secure information flow in programs
are based on domain-specific logics. Though computationally feasible,
these approaches suffer from the need for abstraction and the high
cost of building dedicated tools for real programming languages. We recast
the information flow problem in a general program logic rather than
a problem-specific one. We investigate the feasibility of this approach by
showing how a general purpose tool for software verification can be used
to perform information flow analyses. We are able to prove security and
insecurity of programs including advanced features such as method calls,
loops, and object types for the target language Java Card. In addition,
we can express declassification of information},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands-SPC05.pdf}
}

@inproceedings{Mantel:Sands:APLAS04,
author = {H. Mantel and David Sands},
booktitle = {Proc. Asian Symp. on Programming Languages and Systems},
title = {Controlled Declassification based on Intransitive Noninterference},
year = 2004,
month = nov,
volume = 3302,
pages = {129--145},
series = {LNCS},
publisher = {Springer-Verlag},
abstract = {%
Traditional noninterference cannot cope with common features of secure
systems like channel control, information filtering, or explicit
weaker security conditions that could support such features in a
language-based setting.  However, a fully satisfactory solution to the
problem has yet to be found.  A key problem is to permit exceptions to
we propose an approach that draws its underlying ideas from
\emph{intransitive noninterference}, a concept usually used on a more
abstract specification level. Our results include a new
bisimulation-based security condition that controls tightly where
downgrading can occur and a sound security type system for checking
this condition.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Mantel-Sands-APLAS04.pdf}
}

@techreport{Mantel:Sands:TR04,
author = {H. Mantel and David Sands},
title = {Controlled Declassification based on Intransitive Noninterference},
year = 2004,
month = nov,
institution = {Chalmers},
number = {2004-06},
series = {Technical Reports in Computing Science at Chalmers University of Technology and G\"oteborg University},
note = {Technical Reports in Computing Science at Chalmers University of Technology and G\"oteborg University.
Extended version of \cite{Mantel:Sands:APLAS04}},
pdf = {http://www.cse.chalmers.se/~dave/papers/Mantel-Sands-TR04.pdf}
}

@inproceedings{DHS03,
author = {A. Darvas and R. H\"{a}hnle and David Sands},
title = {A Theorem Proving Approach to Analysis of Secure Information
Flow (preliminary version)},
booktitle = {Workshop on Issues in the Theory of Security, WITS},
optpages = {},
note = {Subsumed by \cite{DHS05}},
year = {2003},
editor = {Roberto Gorrieri},
organization = {IFIP WG 1.7, ACM SIGPLAN and GI FoMSESS},
pdf = {http://www.cse.chalmers.se/~dave/papers/DHS-WITS03.pdf}
}

@article{MSC-SCP,
author = {A. Moran and David Sands and M. Carlsson},
title = {Erratic fudgets: a semantic theory for an embedded coordination language},
journal = {Science of Computer Programming},
volume = {46},
number = {1-2},
year = {2003},
issn = {0167-6423},
pages = {99--135},
doi = {http://dx.doi.org/10.1016/S0167-6423(02)00088-6},
publisher = {Elsevier North-Holland, Inc.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Moran-Sands-Carlsson-SCP.pdf},
abstract = {
The powerful abstraction mechanisms of functional programming languages
provide the means to develop domain-specific programming languages within the
language itself.  Typically, this is realised by designing a set of
combinators (higher-order reusable programs) for an application area, and by
constructing individual applications by combining and coordinating individual
combinators.  This paper is concerned with a successful example of such an
embedded programming language, namely Fudgets, a library of combinators for
building graphical user interfaces in the lazy functional language Haskell.
The Fudget library has been used to build a number of substantial
applications, including a web browser and a proof editor interface to a proof
checker for constructive type theory.  This paper develops a semantic theory
for the non-deterministic stream processors that are at the heart of the
Fudget concept.  The interaction of two features of stream processors makes the
development of such a semantic theory problematic:
(i)  the sharing of computation provided by the lazy evaluation mechanism
of the underlying host language, and
(ii) the addition of non-deterministic choice needed to handle the
natural concurrency that reactive applications entail.
We demonstrate that this combination of features in a higher-order functional
language can be tamed to provide a tractable semantics theory and induction
principles suitable for reasoning about contextual equivalence of Fudgets.}
}

@incollection{Sands:Lambda02,
author = {David Sands and J. Gustavsson and A. Moran},
title = {Lambda Calculi and Linear Speedups},
booktitle = {The Essence of Computation: Complexity, Analysis, Transformation. Essays Dedicated to Neil D. Jones},
publisher = {Springer Verlag},
year = 2002,
number = 2566,
series = {LNCS},
abstract = {The equational theories at the core of most functional programming
are variations on the standard lambda calculus. The best known
of these is the call-by-value lambda calculus whose core is the
value-beta computation rule.
This paper investigates the transformational power of this core theory of
functional programming. The main result is that the equational theory
of the call-by-value lambda calculus cannot speed up (or slow down)
programs by more than a constant factor. The corresponding result also
holds for call-by-need but we show that it does not hold for call-byname:
there are programs for which a single beta reduction can change
the program's asymptotic complexity.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands-Gustavsson-Moran.pdf}
}

@proceedings{Sands:2001:PLS,
editor = {David Sands},
booktitle = {Programming languages and systems: 10th European
Symposium on Programming, {ESOP} 2001, held as part of
the Joint European Conferences on Theory and Practice
of Software, {ETAPS} 2001, Genova, Italy, April 2--6,
2001: proceedings},
title = {Programming languages and systems: 10th European
Symposium on Programming, {ESOP} 2001, held as part of
the Joint European Conferences on Theory and Practice
of Software, {ETAPS} 2001, Genova, Italy, April 2--6,
2001: proceedings},
volume = {2028},
publisher = {Springer-Verlag Inc.},
address = {New York, NY, USA},
pages = {xiii + 431},
year = {2001},
isbn = {3-540-41862-8 (paperback)},
lccn = {QA76.6 .E976 2001; QA267.A1 L43 no.2028},
bibdate = {Thu Jan 17 11:49:19 MST 2002},
series = {Lecture Notes in Computer Science},
keywords = {computer programming -- congresses; programming
languages (electronic computers) -- congresses}
}

@inproceedings{Agat:Sands:Confidentiality,
author = {J. Agat and David Sands},
title = {On Confidentiality and Algorithms},
pages = {64--77},
editor = {Francis M. Titsworth},
booktitle = {Proceedings of the 2001 {IEEE} Symposium on Security
and Privacy ({S}\&{P}-01)},
month = {May},
publisher = {IEEE Computer Society},
abstract = {Recent interest in methods for certifying programs for secure
information flow (noninterference)
have failed to raise a key question: can efficient algorithms
be written so as to satisfy the requirements of secure information flow?
In this paper we discuss how algorithms for searching and sorting can be
adapted to work on collections of secret data without leaking any
confidential information, either directly, indirectly,
or through timing behaviour.
We pay particular attention to the issue of timing channels caused by cache
behaviour, and argue that it is necessary to
disable the effect of the cache in order to
construct algorithms manipulating pointers to objects
in such a way that they satisfy the conditions of noninterference.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/agat-sands-SP01.pdf},
year = {2001}
}

@inproceedings{Gustavsson:Sands:Possibilities,
author = {J{\"o}rgen Gustavsson and David Sands},
title = {Possibilities and Limitations of Call-by-Need Space
Improvement},
pages = {265--276},
month = {September},
year = {2001},
booktitle = {Proceeding of the Sixth {ACM} {SIGPLAN}
International Conference on Functional Programming
(ICFP'01)},
publisher = {ACM Press},
abstract = {
Innocent-looking program transformations can easily change the space
complexity of lazy functional programs. The theory of \emph{space
improvement} seeks to characterise those local program
transformations which are guaranteed never to worsen asymptotic space
complexity of any program.  Previous work by the authors introduced the
space improvement relation and showed that a number of simple local
transformation laws are indeed space improvements. This paper seeks an
answer to the following questions: is the improvement relation inhabited
by interesting program transformations, and, if so, how might they be
established? We show that the asymptotic space improvement relation is
semantically badly behaved, but that the theory of \emph{strong space
improvement} possesses a fixed-point induction theorem which permits
the derivation of improvement properties for recursive definitions.
With the help of this tool we explore the landscape of space improvement by
considering a range of classical program transformations. },
pdf = {http://www.cse.chalmers.se/~dave/papers/PossibilitiesICFP01.pdf}
}

@inproceedings{Sabelfeld:Sands:Probabilistic,
author = {Andrei Sabelfeld and David Sands},
title = {Probabilistic Noninterference for Multi-threaded Programs},
booktitle = {Proceedings of the 13th IEEE Computer Security
Foundations Workshop},
pages = {200--214},
year = {2000},
publisher = {IEEE Computer Society Press},
month = {July},
pdf = {http://www.cse.chalmers.se/~dave/papers/prob-sabelfeld-sands.pdf},
abstract = {
security threat.  Does the program keep your secrets secret? To
answer the question one must specify what it means for a program to
be secure, and one may wish to verify the security specification
before executing the program.  We present a probability-sensitive
security specification (probabilistic noninterference) for
multi-threaded programs based on a probabilistic
bisimulation.  Some previous approaches to specifying
confidentiality rely on a particular scheduler for executing program
threads. This is unfortunate since scheduling policy is typically
outside the language specification for multi-threaded languages. We
describe how to generalise noninterference in order to define robust
security with respect to any particular scheduler used and show, for
a simple imperative language with dynamic thread creation, how the
security condition satisfies compositionality properties which
facilitates a straightforward proof of correctness of e.g. security
type systems.}
}

@incollection{Moran:Sands:Carlsson:Erratic,
author = {A. K. Moran and David Sands and M. Carlsson},
title = {Erratic {F}udgets: {A} semantic theory for an embedded
coordination language},
booktitle = {the Third International Conference on
Coordination Languages and Models; {COODINATION}'99},
series = {Lecture Notes in Computer Science},
number = {1594},
pages = {85--102},
publisher = {Springer-Verlag},
year = {1999},
month = {April},
note = {Extended available: \cite{MSC-SCP}}
}

@article{Sabelfeld:Sands:Per,
author = {A. Sabelfeld and David Sands},
title = {A Per Model of Secure Information Flow in Sequential Programs},
note = {Extended version of \cite{Sabelfeld:Sands:ESOP99}},
journal = {Higher-Order and Symbolic Computation},
volume = {14},
number = {1},
pages = {59--91},
month = {March},
year = {2001},
pdf = {http://www.cse.chalmers.se/~dave/papers/per-sabelfeld-sands.pdf},
abstract = {  This paper proposes an extensional semantics-based formal
specification of secure information-flow properties in sequential
programs based on representing degrees of security by partial
equivalence relations (pers).  The specification clarifies and
unifies a number of specific correctness arguments in the
literature, and connections to other forms of program analysis.  The
approach is inspired by (and equivalent to) the use of partial
equivalence relations in specifying binding-time analysis, and is
thus able to specify security properties of higher-order functions
and partially confidential data''. We extend the approach to
handle nondeterminism by using powerdomain semantics and show how
\emph{probabilistic security properties} can be formalised by using
probabilistic powerdomain semantics.}
}

@inproceedings{Moran:Sands:Need,
author = {A. K. Moran and David Sands},
title = {Improvement in a Lazy Context: An Operational Theory for
Call-By-Need},
booktitle = {Proc. POPL'99, the 26th  ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
year = {1999},
month = {January},
publisher = {ACM Press},
pages = {43--56},
pdf = {http://www.cse.chalmers.se/~dave/papers/cbneed-theory.pdf}
}

@inproceedings{Sabelfeld:Sands:ESOP99,
author = {A. Sabelfeld and David Sands},
title = {A Per Model of Secure Information Flow in Sequential Programs},
booktitle = {Programming Languages and Systems, 8th European Symposium on Programming, ESOP'99},
volume = {1576},
series = {Lecture Notes in Computer Science},
year = {1999},
publisher = {Springer-Verlag},
pages = {40--58},
note = {Extended version in \cite{Sabelfeld:Sands:Per}}
}

@inproceedings{Gustavsson:Sands:Foundation,
author = {J. Gustavsson and David Sands},
title = {A Foundation for Space-Safe Transformations of Call-by-Need Programs},
booktitle = {The Third International Workshop on Higher Order Operational Techniques in Semantics},
year = {1999},
editor = {A. D. Gordon and A. M.Pitts},
volume = {26},
series = {Electronic Notes in Theoretical Computer Science},
publisher = {Elsevier},
pdf = {http://www.cse.chalmers.se/~dave/papers/space.pdf},
url = {http://www.elsevier.nl/locate/entcs/volume26.html},
abstract = {
We introduce a space-improvement relation on programs which guarantees
that whenever M is improved by N, replacement of M by N in a
program can never lead to asymptotically worse space (heap or stack)
behaviour, for a particular model of garbage collection.  This study
takes place in the context of a call-by-need programming language.  For
languages implemented using call-by-need, e.g, Haskell, space behaviour
is notoriously difficult to predict and analyse, and even
innocent-looking equivalences like x + y = y + x can change the
asymptotic space requirements of some programs. Despite this, we
establish a fairly rich collection of improvement laws, with the help of a
context lemma for a finer-grained improvement relation. We briefly
consider an application of the theory; we prove that inlining of
affine-linear bindings (as introduced by a certain class of used-once''
type-systems) is work- and space-safe. We also show that certain weaker
type systems for usage do not provide sufficient conditions for
space-safe inlining.
}
}

@inproceedings{Claessen:Sands:Observable,
author = {K. Claessen and David Sands},
title = {Observable Sharing for Functional Circuit Description},
booktitle = {Advances in Computing Science ASIAN'99; 5th Asian Computing Science Conference},
pages = {62--73},
editor = {P.S. Thiagarajan and R. Yap},
volume = {1742},
series = {Lecture Notes in Computer Science},
year = {1999},
publisher = {Springer-Verlag},
note = {Extended Version Available},
pdf = {http://www.cse.chalmers.se/~dave/papers/observable-sharing.pdf},
abstract = {
Pure functional programming languages have been proposed as a vehicle to
describe, simulate and manipulate circuit specifications. We propose an
extension to Haskell to solve a standard problem when manipulating data
types representing circuits in a lazy functional language. The problem is
that circuits are finite graphs -- but viewing them as an algebraic (lazy)
datatype makes them indistinguishable from potentially infinite regular
trees.  However, implementations of Haskell do indeed represent cyclic
structures by graphs. The problem is that the sharing of nodes that creates
such cycles is not observable by any function which traverses such a
structure. In this paper we propose an extension to call-by-need languages
which makes graph sharing observable. The extension is based on non
updatable reference cells and an equality test (sharing detection) on this
type. We show that this simple and practical extension has well-behaved
semantic properties, which means that many typical source-to-source program
transformations, such as might be performed by a compiler, are still valid
in the presence of this extension.}
}

@unpublished{Moran:Sands:Need-Extended,
author = {A. K. Moran and David Sands},
title = {Improvement in a Lazy Context: An Operational Theory for
Call-By-Need (Extended Version)},
note = {Extended version of \cite{Moran:Sands:Need}},
year = {1998},
month = {November},
pdf = {http://www.cse.chalmers.se/~dave/papers/cbneed-theory-extended.pdf},
abstract = {The standard implementation technique for lazy functional languages is
call-by-need, which ensures that an argument to a function in any  given call
is evaluated at most once. A significant problem with  call-by-need is that it
is difficult - even for compiler writers - to  predict the effects of program
transformations. The traditional  theories for lazy functional languages are
based on call-by-name  models, and offer no help in determining which
transformations do  indeed optimize a program.

In this article we present an operational theory for call-by-need, based upon
an improvement ordering on programs: M is improved by N if in all
program-contexts C, when C[M] terminates then C[N] terminates at least as
cheaply.

We show that this improvement relation satisfies a "context lemma", and
supports a rich inequational theory, subsuming the call-by-need lambda calculi
of Ariola \emph{et al}.  The reduction-based call-by-need calculi are
inadequate as a theory of lazy-program transformation since they only permit
transformations which speed up programs by at most a constant factor (a claim
we substantiate); we go beyond the various reduction-based calculi for
call-by-need by providing powerful proof rules for recursion, including
syntactic continuity - the basis of fixed-point-induction style reasoning, and
an Improvement Theorem, suitable for arguing the safetey and correctness of
recursion-based program transformations.
}
}

@misc{Sands:SOS04,
author = {David Sands},
title = {Representing and Manipulating Contexts --
A Tool for Operational Reasoning },
howpublished = {Invited tutorial, SOS'04},
month = {August},
year = {2004},
url = {http://www.cse.chalmers.se/~dave/SOS04/},
abstract = {The notion of a context is widely used in
programming language semantics -- for example in the
definition of operational equivalences, or program
transformation, and in certain styles of operational
semantics definitions. This tutorial describes how
the use of a higher-order syntax representation of
contexts combines smoothly with higher-order syntax
for evaluation rules, so that definitions can be
extended to work over contexts. This provides "for
free" -- without the development of any new
language-specific context calculi -- evaluation
rules for contexts which commute with
hole-filling. We have found this to be a useful
technique for directly reasoning about operational
equivalence. Some illustrations of the utility of
this technique will be given.}
}

@incollection{Sands:Computing,
author = {David Sands},
title = {Computing with Contexts: {A} simple approach},
booktitle = {Second Workshop on Higher-Order Operational Techniques in
Semantics (HOOTS II)},
editor = {A. D. Gordon and A. M. Pitts and C. L. Talcott},
series = {Electronic Notes in Theoretical Computer Science},
year = {1998},
volume = {10},
publisher = {Elsevier Science Publishers B.V.},
pdf = {http://www.cse.chalmers.se/~dave/papers/ENTCS10-sands.pdf},
abstract = {This article describes how the use of a higher-order syntax
representation of contexts [due to A. Pitts] combines smoothly with
higher-order syntax for evaluation rules, so that definitions can be
extended to work over contexts. This provides "for free" - without the
development of any new language-specific context calculi - evaluation rules
for contexts which commute with hole-filling.  We have found this to be a
useful technique for directly reasoning about operational equivalence. A
small illustration is given based on a unique fixed-point induction
principle for a notion of guarded context in a functional language.}
}

@article{TCS::HankinMS1998,
title = {Refining multiset tranformers},
author = {Chris Hankin and Daniel Le M{\'e}tayer and David
Sands},
pages = {233--258},
journal = {Theoretical Computer Science},
year = {1998},
volume = {192},
number = {2},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hankin-LeMetayer-Sands-TCS.pdf},
abstract = {
Gamma is a minimal language based on local multiset rewriting with an
elegant chemical reaction metaphor. The virtues of this paradigm in terms
of systematic program construction and design of parallel programs have
been argued in previous papers. Gamma can also be seen as a notation for
coordinating independent programs in a larger application. In this paper,
we study a notion of refinement for programs involving parallel and
sequential composition operators, and derive a number of programming
laws. The calculus thus obtained is applied in the development of a generic
"pipelining" transformation, which enables certain sequential compositions
to be refined into parallel compositions.
}
}

@inproceedings{Sands:Weichert:From,
author = {David Sands and M. Weichert},
title = {From {G}amma to {CBS}:
Refining multiset transformations with broadcasting processes},
month = {January},
year = {1998},
booktitle = {Proc. of the 31st Hawaii Intl. Conf. on System Sciences},
volume = {VII Software Technology Track},
pages = {265--274},
editor = {H. El-Rewini},
organization = {{IEEE} Computer Soc.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Weichert-Sands-HICSS98.pdf}
}

@incollection{Sands:HOOTS,
author = {David Sands},
title = {Improvement Theory and Its Applications},
pages = {275--306},
editor = {A. D. Gordon and A. M. Pitts},
booktitle = {Higher {O}rder {O}perational {T}echniques in {S}emantics},
publisher = {Cambridge University Press},
series = {Publications of the Newton Institute},
year = {1998},
abstract = {An improvement theory is a variant of the standard
theories of observational approximation (or equivalence) in which the basic
observations made of a functional program's execution include some
intensional information about, for example, the program's computational cost.
One program is an improvement of another if its execution is more efficient
in any program context. In this article we give an overview of our work on
the theory and applications of improvement. Applications include reasoning
about time properties of functional programs, and proving the correctness of
program transformation methods. We also introduce a new application, in the
form of some bisimulation-like proof techniques for equivalence, with
something of the flavour of Sangiorgi's bisimulation up-to expansion and
context''.},
pdf = {http://www.cse.chalmers.se/~dave/papers/hoots97.pdf}
}

@inproceedings{Sands:POPL97,
author = {David Sands},
title = {From {SOS} Rules to Proof Principles: An Operational
Metatheory for Functional Languages},
booktitle = {Proceedings of
the 24th Annual ACM SIGPLAN-SIGACT Symposium on
Principles of Programming Languages (POPL)},
month = {January},
year = {1997},
publisher = {ACM Press},
abstract = {Structural Operational Semantics (SOS) is a widely used
formalism for specifying the computational meaning of programs, and is
commonly used in specifying the semantics of functional languages. Despite
this widespread use there has been relatively little work on the
metatheory'' for such semantics. As a consequence the operational approach
to reasoning is considered \emph{ad hoc} since the same basic proof
techniques and reasoning tools are reestablished over and over, once for each
operational semantics specification. This paper develops some metatheory for
a certain class of SOS language specifications for functional languages. We
define a rule format, Globally Deterministic SOS (GDSOS), and establish some
proof principles for reasoning about equivalence which are sound for all
languages which can be expressed in this format. More specifically, if the
SOS rules for the operators of a language conform to the syntax of the GDSOS
format, then (i) a syntactic analogy of continuity holds, which relates a
recursive function to its finite unwindings, and forms the basis of a
Scott-style fixed-point induction technique; (ii) a powerful induction
principle called improvement induction holds for a certain class of
instrumented GDSOS semantics; (iii) a useful bisimulation-based coinductive
proof technique for operational approximation (and its instrumented''
variants) is sound.},
pdf = {http://www.cse.chalmers.se/~dave/papers/sands-POPL97.pdf}
}

@article{Sands:TCS,
author = {David Sands},
title = {Proving the
Correctness of Recursion-Based Automatic Program
Transformations},
journal = {Theoretical Computer Science},
volume = {167},
number = {10},
month = {October},
year = {1996},
note = {Preliminary version in TAPSOFT'95, LNCS 915},
abstract = {This paper shows how the {\em Improvement Theorem}---a semantic
condition for establishing the total correctness of
program transformation on higher-order functional programs---has practical
value in proving the correctness of automatic techniques.
To this end we develop and study a
family of automatic program transformations. The root of this  family
is  a well-known and widely studied
transformation called {\em deforestation}; descendants include
generalisations to
richer input languages (e.g.\ higher-order functions),
and more powerful transformations, including a source-level representation of
some of the techniques known from Turchin's {\em supercompiler}.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/sands-TCS96.pdf}
}

@article{Sands:TOPLAS,
author = {David Sands},
title = {Total Correctness by Local Improvement in the Transformation
of Functional Programs},
journal = {ACM Transactions on Programming Languages and Systems
(TOPLAS)},
year = {1996},
volume = {18},
number = {2},
month = {March},
pages = {175--234},
note = {Extended version of \cite{Sands:POPL}},
summary = {The goal of program transformation is to
improve efficiency while preserving meaning. One of the
best-known transformation techniques is Burstall and
Darlington's unfold-fold method.  Unfortunately the
unfold-fold method itself guarantees neither improvement
in efficiency nor total correctness.  The correctness
problem for unfold-fold is an instance of a strictly more
general problem: transformation by locally
equivalence-preserving steps does not necessarily
condition for the total correctness of transformations on
recursive programs, which, for the first time, deals with
higher-order functional languages (both strict and
nonstrict) including lazy data structures.  The main
technical result is an improvement theorem which
says that if the local transformation steps are guided by
certain optimization concerns (a fairly natural condition
for a transformation), then correctness of the
transformation follows.  The improvement theorem makes
essential use of a formalized improvement theory; as a
rather pleasing corollary it also guarantees that the
transformed program is a formal improvement over the
original.  The theorem has immediate practical
consequences: it is a powerful tool for proving the
correctness of existing transformation methods for
higher-order functional programs, without having to
ignore crucial factors such as memoization or
folding, and it yields a simple syntactic method
for guiding and constraining the unfold-fold method in
the general case so that total correctness (and
improvement) is always guaranteed.  },
pdf = {http://www.cse.chalmers.se/~dave/papers/sands-TOPLAS96.pdf},
optannote = {}
}

@incollection{Sands:Composed,
author = {David Sands},
title = {Composed Reduction Systems},
pages = {360--377},
booktitle = {Coordination
Programming: Mechanisms, Models and Semantics},
editors = {J-M Andreoli and C. Hankin and D. Le~{M\'etayer}},
year = {1996},
publisher = {IC Press, World Scientific},
systems of programs built up from the reduction relations of some reduction
system, by means of parallel and sequential composition operators. The
Calculus of Gamma Programs previously studied by Hankin et al are an instance
of these systems in the case where the reduction relations are a certain
class of multi-set rewriting relations. The trace-based compositional
semantics of composed reduction systems is considered, and a new
graph-representation is introduced as an intermediate program form which
seems well-suited to the study of compositional semantics and program
logics.},
pdf = {http://www.cse.chalmers.se/~dave/papers/red2.pdf}
}

@incollection{Sands:Syntactic,
author = {David Sands},
title = {Syntactic Continuity from Structural Operational Semantics},
booktitle = {Theory and Formal Methods 1996: Proceedings
of the Third Imperial College Workshop on Theory and Formal Methods},
publisher = {IC Press},
year = {1996},
editor = {G. McCusker and  A. Edalat and S. Jourdan},
note = {(subsumed by \cite{Sands:POPL97})}
}

@article{Sands:JLC,
author = {David Sands},
title = {A Na\"{\i}ve Time Analysis and its
Theory of Cost Equivalence},
journal = {Journal of Logic and Computation},
year = {1995},
volume = {5},
number = {4},
optpages = {495--541},
optpostscript = {},
abstract = {Techniques for reasoning about extensional properties of
functional programs are well-understood, but methods for analysing the
underlying intensional, or operational properties have been much neglected.
This paper begins with the development of a simple but useful
calculus for time analysis of non-strict functional programs with lazy
lists.  One limitation of this basic calculus is that the ordinary
equational reasoning on functional programs is not valid. In order to buy
back some of these equational properties we develop a non-standard
operational equivalence relation called {\em cost equivalence}, by
considering the number of computation steps as an observable'' component
of the evaluation process.  We define this relation by analogy with Park's
definition of bisimulation in {\sc ccs}. This formulation allows us to show
that cost equivalence is a contextual congruence (and thus is substitutive
with respect to the basic calculus) and provides useful proof techniques
for establishing cost-equivalence laws.

It is shown that basic evaluation time can be derived
by demonstrating a certain form of cost equivalence, and we give a
axiomatisation of cost equivalence which complete is with respect to
this application. This shows that cost equivalence subsumes the basic
calculus.

Finally we show how a new operational interpretation of evaluation
demands can be used to provide a smooth interface between this time
analysis and more compositional approaches, retaining the advantages
of both.},
pdf = {http://www.cse.chalmers.se/~dave/papers/JLC-FINAL-VERSION.pdf}
}

@inproceedings{Henglein:Sands:PLILP95,
author = {Henglein, F and Sands, D},
year = {1995},
title = {A Semantic Model of Binding Times for Safe Partial
Evaluation},
booktitle = {Programming Languages: Implementations, Logics and
Programs (PLILP'95)},
editor = {Swierstra, S.D. and Hermenegildo, M.},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {982},
pages = {299--320},
keywords = {binding time, ideal model, partial evaluation,
safety, topped domains, monovariance},
summary = {We develop a semantic model of binding times that is
sufficient to guarantee {\em safety} of a large class of partial evaluators.
In particular, we
\begin{itemize}
\item identify problems of existing models of binding-time properties
based on projections and partial equivalence relations (PER{}s),
which imply that they are not adequate to prove the safety of
simple off-line partial evaluators;
\item
propose a new model for binding times that avoids the potential pitfalls of
projections(PER{}s);
\item
specify binding-time annotations justified by a collecting'' semantics,
and clarify the connection between extensional properties (local analysis)
and program annotations (global analysis) necessary to support binding-time
analysis;
\item
prove the safety of a simple but liberal class of
monovariant partial evaluators for a higher-order functional language
with recursive types, based on annotations justified by the model.
\end{itemize}
This is the extended version of the PLILP conference article, including all
relevant proofs.},
pdf = {http://www.cse.chalmers.se/~dave/papers/henglein-sands.pdf}
}

@inproceedings{Sands:TAPSOFT,
author = {David Sands},
title = {Correctness of Recursion-Based Automatic Program
Transformations},
number = {915},
series = {LNCS},
booktitle = {International Joint Conference on Theory and Practice of
Software Development ({TAPSOFT/FASE} '95)},
year = {1995},
publisher = {Springer-Verlag},
note = {Full version in \cite{Sands:TCS}}
}

@inproceedings{Sands:POPL95,
author = {David Sands},
title = {Total Correctness by Local Improvement in Program
Transformation},
booktitle = {Proceedings of
the 22nd Annual ACM SIGPLAN-SIGACT Symposium on
Principles of Programming Languages (POPL)},
year = {1995},
publisher = {ACM Press},
month = {January},
note = {Extended version in \cite{Sands:TOPLAS}}
}

@inproceedings{Sands:Higher,
author = {David Sands},
title = {Higher-Order Expression Procedures},
booktitle = { Proceeding of the ACM SIGPLAN Syposium on Partial
Evaluation
and Semantics-Based Program Manipulation, PEPM'95},
year = {1995},
publisher = {ACM},
pages = {190--201},
abstract = {Summary: We investigate the soundness of a specialisation technique due to Scherlis, expression procedures, in the context of a higher-order
non-strict language. An expression procedure is a generalised procedure construct which provides a means of expressing contextual properties of
functions, and thereby facilitates the manipulation and contextual specialisation of programs. Generalised programs provide a route to the correct
specialisation of recursive functions, are transformed by means of three basic transformation rules: composition, application and abstraction. In this
paper we show that the expression procedure approach is correct for call-by-name evaluation, including lazy data structures and higher order
functions.
},
url = {http://www.diku.dk/research-groups/topps/bibliography/1995.html#D-223},
pdf = {http://www.cse.chalmers.se/~dave/papers/ep2.pdf}
}

@inproceedings{Sands:Towards,
author = {David Sands},
title = {Towards Operational Semantics of Contexts in Functional Languages},
number = {NS-94-6},
series = {{BRICS} Notes Series},
booktitle = {Proceedings of the 6th Nordic Workshop on Programming
Theory},
pages = {378--385},
year = {1994},
abstract = {We consider operational semantics of contexts (terms with
holes) in the setting of lazy functional languages, with the aim of
providing a balance between operational and compositional reasoning, and a
framework for semantics-based program analysis and manipulation.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands-NWP94.pdf}
}

@inproceedings{Sands:TFM,
semno = {D-172},
author = {David Sands},
title = {Laws of Parallel Synchronised Termination},
booktitle = {Theory and Formal Methods 1993:
Proceedings of the First Imperial College,
Department of Computing,
Workshop on Theory and Formal Methods},
editor = {G.L. Burn and S.J. Gay and M.D. Ryan},
month = {March},
year = {1993},
address = {Isle of Thorns,  UK},
publisher = {Springer-Verlag},
series = {Workshops in Computing Series},
abstract = {The salient feature of the composition operators for
Gamma programs is that for termination, the parallel composition operator
demands that its operands must terminate synchronously.  This paper studies
the inequational partial correctness properties of the combination of
sequential and parallel composition operators for Gamma programs, provable
from a particular compositional semantics (Brookes-style transition traces)
and shows that the residual program'' input-output laws originally
described by Hankin {\em et al.\/} are also verified by the model.}
}

@inproceedings{Sands:93:AParallelProgrammingStyle,
author = {C. Hankin and D. Le M\'{e}tayer and David Sands},
title = {A Parallel Programming Style and its Algebra
of Programs},
booktitle = {Proceeding of Parallel Architectures and Languages
Europe ({PARLE})},
semno = {D-165},
year = {1993},
editor = {A. Bode and M. Reeve and G. Wolf},
series = {Lecture Notes in Computer Science},
volume = {694},
pages = {367--378},
publisher = {Springer-Verlag},
keywords = {Parallel Programming Languages: Language Constructs,
Semantics.},
abstract = {We present a set of primitive program schemes,
which together with just two
basic combining forms provide a suprisingly expressive parallel programming
language.  The primitive program schemes (called {\em tropes}) take the
form of parameterised conditional rewrite rules, and the computational
model is a variant of the {\em Gamma}\/ style, in which computation
proceeds by nondeterministic local rewriting of a global multiset.

We consider a number of examples which illustrate the use of tropes and we
study the algebraic properties of the sequential and parallel combining
forms.  Using the examples we illustrate the application of these
properties in the verification of some simple program transformations.},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands-PARLE93.pdf}
}

@inproceedings{Sands:FMP93,
semno = {D-171},
author = {David Sands},
title = {A Compositional Semantics of Combining Forms
for {G}amma Programs},
booktitle = {Formal Methods in
Programming and Their Applications, International
Conference, Academgorodok, Novosibirsk, Russia, June/July 1993.},
editor = {D. Bj{\"o}rner and M. Broy and I. Pottosin},
year = {1993},
pages = {43--56},
series = {Lecture Notes in Computer Science},
publisher = {Springer-Verlag},
optmonth = {},
optnote = {},
summary = {The Gamma model is a minimal programming language based on local
multiset rewriting (with an elegant chemical reaction metaphor);
Hankin {\em et al\/} derived a calculus of Gamma programs built
from basic reactions and two composition operators, and
applied it  to the study of relationships between parallel and
sequential program composition, and related program transformations.
The main shortcoming of the calculus of Gamma programs''
is that the refinement and
equivalence laws described are not compositional, so that a refinement
of a sub-program does not necessarily imply a refinement of the
program.

In this paper we  address this problem by
defining  a compositional (denotational) semantics for
Gamma, based on the {\em transition trace\/} method of Brookes,
and by showing how this can be used to verify substitutive
refinement laws, potentially widening the applicability and scalability
of program transformations previously described.

The compositional  semantics is also useful in the study of
relationships between alternative combining forms at a deeper semantic
level. We consider the semantics and properties of
a number of new combining forms for the Gamma
model.
}
}

@techreport{Hankin:Metayer:Sands:Calculus,
author = {C. Hankin and D. Le M\'{e}tayer and David Sands},
title = {A Calculus of {G}amma Programs},
institution = {INRIA, Renne},
month = {October},
year = {1992},
pages = {31 pages},
type = {INRIA Research Report},
number = {1758},
note = {(Also: Imperial College Technical Report DOC 92/22)},
summary = {}
}

@inproceedings{Hankin:Metayer:Sands:CalculusP,
author = {C. Hankin and D. Le M\'{e}tayer and David Sands},
title = {A Calculus of {G}amma Programs},
optcrossref = {},
optkey = {},
editor = {U. Banerjee and D. Gelernter and A. Nicolau and D. Padua},
optvolume = {},
number = {757},
series = {Lecture Notes in Computer Science},
pages = {342--355},
booktitle = {Languages and Compilers
for Parallel Computing, 5th International Workshop},
year = {1992},
optorganization = {},
publisher = {Springer-Verlag},
month = {August},
optnote = {},
optannote = {}
}

@inproceedings{Sands:FSTandTCS,
author = {David Sands},
title = {Time Analysis, Cost Equivalence and Program Refinement},
booktitle = {Proceedings of the Eleventh Conference on Foundations
of Software Technology and Theoretical Computer Science},
year = {1991},
pages = {25--39},
publisher = {Springer-Verlag},
number = {560},
series = {Lecture Notes in Computer Science},
month = {December},
note = {See \cite{Sands:JLC} for a much extended and revised version.}
}

@inproceedings{Hunt:Sands:PEPM91,
author = {S. Hunt  and David Sands},
title = {{B}inding {T}ime {A}nalysis: {A} {N}ew {PER}spective},
booktitle = {Proceedings of the {A}{C}{M}
Symposium on Partial Evaluation and Semantics-Based Program Manipulation
(PEPM'91)},
year = {1991},
month = {September},
pages = {154--164},
note = {ACM SIGPLAN Notices 26(9)},
abstract = {Given a description of the parameters in a program that
will be known at
partial evaluation time, a {binding time analysis} must  determine
which parts of the program are dependent solely on these known parts
(and therefore also known at partial evaluation time).
In this paper a
{binding time analysis} for the {simply typed lambda calculus} is
presented. The analysis takes the form of an {abstract interpretation}
and uses a novel formalisation of the problem of binding time analysis,
based on the use of {partial equivalence relations}. A simple
proof of correctness is achieved by the use of {logical relations}.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/Hunt:Sands:PEPM91.pdf}
}

@inproceedings{Sands:Skye,
author = {David Sands},
title = {Operational Theories of Improvement in Functional Languages
(Extended Abstract)},
booktitle = {Proceedings of the Fourth {G}lasgow Workshop on
Functional Programming},
year = {1991},
series = {Workshops in Computing Series},
publisher = {{S}pringer-Verlag },
pages = {298--311},
month = {August},
optnote = {},
abstract = {In this paper we address the technical foundations essential to the
aim of providing a semantic basis for the formal treatment of relative
efficiency in functional languages.  For a general class of
{functional''} computation systems, we define a family of improvement
preorderings which express, in a variety of ways, when one expression
is more efficient than another. The main results of this paper build
on Howe's study of equality in lazy computation systems, and are
concerned with the question of when a given improvement relation is
subject to the usual forms of (in)equational reasoning (so that, for
example, we can improve an expression by improving any
sub-expression).  For a general class of computation systems we
establish conditions on the operators of the language which guarantee
that an improvement relation is a precongruence.  In addition, for a
particular higher-order nonstrict functional language, we show that
any improvement relation which satisfies a simple monotonicity
condition with respect to the rules of the operational semantics has
the desired congruence property.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands:Skye.pdf}
}

@phdthesis{Sands:PhDthesis,
author = {David Sands},
title = {Calculi for Time Analysis of Functional Programs},
school = {Department of Computing, Imperial College},
year = {1990},
month = {September},
abstract = {Techniques for reasoning about extensional properties of
functional programs are well-understood, but methods for analysing  the
underlying intensional, or operational properties have been much neglected.
This thesis presents the  development of several calculi for time analysis
of functional programs.

We focus on two features, higher-order functions and lazy evaluation, which
contribute much to the expressive power and semantic elegance of
functional languages, but serve to make operational properties more
opaque.

Analysing higher-order functions is problematic because complexity is
dependent not only on the cost of computing, but also on the cost of {\em
applying}\ function-valued expressions.  Techniques for statically deriving
programs which compute time-cost in the presence of arbitrary higher-order
functions are developed. The key to this process is the introduction of
syntactic structures called {\em cost-closures}, which enable intensional
properties to be carried by functions. The approach is formalised by the
construction of an appropriate cost-model, against which the correctness of
the derivation is proved. A specific factorisation tactic for reasoning
about higher-order functions out of context is illustrated.

Reasoning about lazy evaluation (ie call-by-name, or more usually,
call-by-need) is problematic because the cost of evaluating an expression
cannot be understood simply from the costs of its sub-expressions.  A
direct calculus for reasoning about a call-by-name language with lazy lists
is derived from a simple operational model. In order to extend this
calculus with a restricted form of equational reasoning, a nonstandard
notion of operational approximation called {\em cost-simulation} is
developed, by analogy with {\em (bi)simulation} in CCS.

The problem with calculi of the above form, based directly on an
operational model, is that they do not yield a {\em compositional}
description of cost, and cannot model {\em lazy evaluation}
(graph-reduction) easily. We show how a description of the {\em context} in
which a function is evaluated can be used to parameterise two types of
time-equation: {\em sufficient-time} equations and {\em necessary-time}
equations, which together provide bounds on the exact time-cost of lazy
evaluation.  This approach is extended to higher-order functions using a
modification of the cost-closure technique.},
pdf = {http://www.cse.chalmers.se/~dave/papers/PhDthesis.pdf}
}

@inproceedings{Sands:ESOP90,
author = {David Sands},
title = {Complexity Analysis for a Lazy Higher-Order Language},
booktitle = {Proceedings of the Third European Symposium on Programming},
publisher = {Springer-Verlag},
year = {1990},
month = {May},
pages = {361--376},
number = {432},
series = {{L}{N}{C}{S}},
abstract = {This paper is concerned with the time-analysis of functional programs.
Techniques which enable us to reason formally
in the study of functional programming. We concentrate here on the
construction of
equations which compute the time-complexity of expressions
in a  lazy higher-order language.

The problem with higher-order functions is that complexity is dependent on
the cost of applying functional parameters. Structures called {\em
cost-closures} are introduced to allow us to model both functional
parameters {\em and} the cost of their application.

The problem with laziness  is that complexity is dependent on {\em context}.Projections are used to characterise the context in
which an expression is evaluated, and cost-equations are parameterised by this
context-description to give a compositional time-analysis.
Using  this form of context information  we   introduce two types
of time-equation:
{\em sufficient-time} equations  and {\em necessary-time} equations,
which together provide bounds on the exact time-complexity.
},
pdf = {http://www.cse.chalmers.se/~dave/papers/Sands:ESOP90.pdf}
}

@inproceedings{Sands:GFPW,
author = {David Sands},
title = {Complexity Analysis for a Lazy Higher-Order Language},
booktitle = {Proceedings of  the Glasgow Workshop on Functional Programming },
publisher = {Springer Verlag},
pages = {56--79},
series = {Workshops in Computing Series},
month = {August},
year = {1989},
note = {Extended Version of \cite{Sands:ESOP90}}
}

@techreport{Sands:88,
author = {David Sands},
title = {Complexity Analysis for a Higher Order Language},
institution = {Imperial College},
number = {DOC 88/14},
month = {October},
abstract = {The aim of program transformation is to increase efficiency whilst
preserving meaning. Techniques which enable us to reason formally
about a program's execution costs are therefore potentially useful
for assisting the program transformation process.
This paper is concerned with the time-analysis of functional programs.
We show how techniques for deriving equations which compute the
time-complexity of a functional program can be extended to
handle arbitrary higher-order functions.
The derivation  yields a functional program, is  mechanisable, and thus forms
the basis for an analysis tool for higher-order  languages.},
year = {1988}
}
`

Mon Oct 10 18:41:56 CEST 2011

The articles presented above are made available to ensure timely dissemination of scholarly and technical work. Copyright and all rights therein are retained by authors or by other copyright holders. All persons copying this information are expected to adhere to the terms and constraints of the respective copyright holder. In most cases, these works may not be reposted without the explicit permission of the copyright holder