@string{brics = "{BRICS}"}
@string{daimi = "Department of Computer Science, University of Aarhus"}
@string{iesd = "Department of Computer Science, Institute
of Electronic Systems, Aalborg University"}
@string{rs = "Research Series"}
@string{ns = "Notes Series"}
@string{ls = "Lecture Series"}
@string{ds = "Dissertation Series"}
@techreport{BRICS-DS-03-14,
author = "Varacca, Daniele",
title = "Probability, Nondeterminism and Concurrency:
Two Denotational Models for Probabilistic
Computation",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-14",
address = daimi,
month = nov,
note = "PhD thesis. xii+163~pp",
comment = "Defence: November~24, 2003 ",
abstract = "Nondeterminism is modelled in domain theory by
the notion of a powerdomain, while probability
is modelled by that of the probabilistic
powerdomain. Some problems arise when we want
to combine them in order to model computation
in which both nondeterminism and probability
are present. In particular there is no
categorical {\em distributive law} between
them. We introduce the {\em powerdomain of
indexed valuations} which modifies the usual
probabilistic powerdomain to take more detailed
account of where probabilistic choices are
made. We show the existence of a distributive
law between the powerdomain of indexed
valuations and the nondeterministic
powerdomain. By means of an equational theory
we give an alternative characterisation of
indexed valuations and the distributive law. We
study the relation between valuations and
indexed valuations. Finally we use indexed
valuations to give a semantics to a programming
language. This semantics reveals the
computational intuition lying behind the
mathematics.\bibpar
In the second part of the thesis we provide an
operational reading of continuous valuations on
certain domains (the distributive concrete
domains of Kahn and Plotkin) through the model
of {\em probabilistic event structures}. Event
structures are a model for concurrent
computation that account for causal relations
between events. We propose a way of adding
probabilities to confusion free event
structures, defining the notion of
probabilistic event structure. This leads to
various ideas of a run for probabilistic event
structures. We show a confluence theorem for
such runs. Configurations of a confusion free
event structure form a distributive concrete
domain. We give a representation theorem which
characterises completely the powerdomain of
valuations of such concrete domains in terms of
probabilistic event structures.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-13,
author = "Nygaard, Mikkel",
title = "Domain Theory for Concurrency",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-13",
address = daimi,
month = nov,
note = "PhD thesis. xiii+161~pp",
comment = "Defence: November~21, 2003",
abstract = "Concurrent computation can be given an
abstract mathematical treatment very similar to
that provided for sequential computation by
domain theory and denotational semantics of
Scott and Strachey. \bibpar
A simple domain theory for concurrency is
presented. Based on a categorical model of
linear logic and associated comonads, it
highlights the role of linearity in concurrent
computation. Two choices of comonad yield two
expressive metalanguages for higher-order
processes, both arising from canonical
constructions in the model. Their denotational
semantics are fully abstract with respect to
contextual equivalence. \bibpar
One language, called HOPLA for Higher-Order
Process LAnguage, derives from an exponential
of linear logic. It can be viewed as an
extension of the simply-typed lambda calculus
with CCS-like nondeterministic sum and prefix
operations, in which types express the form of
computation path of which a process is capable.
HOPLA can directly encode calculi like CCS, CCS
with process passing, and mobile ambients with
public names, and it can be given a
straightforward operational semantics
supporting a standard bisimulation congruence.
The denotational and operational semantics are
related with simple proofs of soundness and
adequacy. Full abstraction implies that
contextual equivalence coincides with logical
equivalence for a fragment of Hennessy-Milner
logic, linking up with simulation equivalence.
\bibpar
The other language is called Affine HOPLA and
is based on a weakening comonad that yields a
model of affine-linear logic. This language
adds to HOPLA an interesting tensor operation
at the price of linearity constraints on the
occurrences of variables. The tensor can be
understood as a juxtaposition of independent
processes, and allows Affine HOPLA to encode
processes of the kind found in treatments of
nondeterministic dataflow. \bibpar
The domain theory can be generalised to
presheaf models, providing a more refined
treatment of nondeterministic branching and
supporting notions of bisimulation. The
operational semantics for HOPLA is guided by
the idea that derivations of transitions in the
operational semantics should correspond to
elements of the presheaf denotations. Similar
guidelines lead to an operational semantics for
the first-order fragment of Affine HOPLA. An
extension of the operational semantics to the
full language is based on a stable denotational
semantics which associates to each computation
the minimal input necessary for it. Such a
semantics is provided, based on event
structures; it agrees with the presheaf
semantics at first order and exposes the tensor
operation as a simple parallel composition of
event structures. \bibpar
The categorical model obtained from presheaves
is very rich in structure and points towards
more expressive languages than HOPLA and Affine
HOPLA---in particular concerning extensions to
cover independence models. The thesis concludes
with a discussion of related work towards a
fully fledged domain theory for concurrency.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-12,
author = "Oliva, Paulo B.",
title = "Proof Mining in Subsystems of Analysis",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-12",
address = daimi,
month = sep,
note = "PhD thesis. xii+198~pp",
comment = "Defence: September~26, 2003",
abstract = "This dissertation studies the use of methods
of proof theory in extracting new information
from proofs in subsystems of classical
analysis. We focus mainly on ineffective
proofs, i.e.\ proofs which make use of
ineffective principles ranging from weak
K{\"o}nig's lemma to full comprehension. The
main contributions of the dissertation can be
divided into four parts:
\begin{enumerate}
\item A discussion of how monotone functional
interpretation provides the right notion of
``numerical implication'' in analysis. We
show among other things that monotone
functional interpretation naturally creates
well-studied moduli when applied to various
classes of statements (e.g.\ uniqueness,
convexity, contractivity, continuity and
monotonicity) and that the interpretation of
implications between those statements
corresponds to translations between the
different moduli.
\item A case study in $L_1$-approximation, in
which we analyze Cheney's proof of Jackson's
theorem, concerning uniqueness of the best
approximation, w.r.t.\ $L_1$-norm, of
continuous functions $f \in C[0, 1]$ by
polynomials of bounded degree. The result of
our analysis provides the first effective
modulus of uniqueness for
$L_1$-approximation. Moreover, using this
modulus we give the first complexity analysis
of the sequence of best $L_1$-approximations
for polynomial-time computable functions $f
\in C[0, 1]$.
\item A comparison between three different
forms of bar recursion, in which we show
among other things that the type structure of
strongly majorizable functionals is a model
of modified bar recursion, that modified bar
recursion is not S1--S9 computable over the
type structure of total continuous functions
and that modified bar recursion de nes
(primitive recursively, in the sense of
Kleene) Spector's bar recursion.
\item An adaptation of functional
interpretation to handle ine ective proofs in
feasible analysis, which provides the first
modular procedure for extracting
polynomial-time realizers from ineffective
proofs (i.e.\ proofs involving weak
K{\"o}nig's lemma) of $\prod^0_2$-theorems in
feasible analysis.
\end{enumerate}
",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-11,
author = "Koprowski, Maciej",
title = "Cryptographic Protocols Based on Root
Extracting",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-11",
address = daimi,
month = aug,
note = "PhD thesis. xii+138~pp",
comment = "Defence: August~8, 2003",
abstract = "In this thesis we design new cryptographic
protocols, whose security is based on the
hardness of root extracting or more speci cally
the RSA problem. \bibpar
First we study the problem of root extraction
in nite Abelian groups, where the group order
is unknown. This is a natural generalization of
the, heavily employed in modern cryptography,
RSA problem of decrypting RSA ciphertexts. We
examine the complexity of this problem for
generic algorithms, that is, algorithms that
work for any group and do not use any special
properties of the group at hand. We prove an
exponential lower bound on the generic
complexity of root extraction, even if the
algorithm can choose the ``public exponent''
itself. In other words, both the standard and
the strong RSA assumption are provably true
w.r.t.\ generic algorithms. The results hold
for arbitrary groups, so security
w.r.t.\ generic attacks follows for any
cryptographic construction based on root
extracting. \bibpar
As an example of this, we modify Cramer-Shoup
signature scheme such that it becomes a generic
algorithm. We discuss then implementing it in
RSA groups without the original restriction
that the modulus must be a product of safe
primes. It can also be implemented in class
groups. In all cases, security follows from a
well de ned complexity assumption (the strong
root assumption), without relying on random
oracles. \bibpar
A {\em smooth} natural number has no big prime
factors. The probability, that a random natural
number not greater than $x$ has all prime
factors smaller than $x^{1/u}$ with $u>1$, is
asymptotically close to the Dickman function
$\rho(u)$. By careful analysis of Hildebrand's
asymptotic smoothness results and applying new
techniques, we derive concrete bounds on the
smoothness probabilities assuming the Riemann
hypothesis. We show how our results can be
applied in order to suggest realistic values of
security parameters in Gennaro-Halevi-Rabin
signature scheme. \bibpar
We introduce a modi ed version of Fischlin's
signature scheme where we substitute prime
exponents with random numbers. Equipped with
our exact smoothness bounds, we can compute
concrete sizes of security parameters,
providing a currently acceptable level of
security. \bibpar
This allows us to propose the rst practical
blind signature scheme provably secure, without
relying on heuristics called random oracle
model (ROM). We obtain the protocol for issuing
blind signatures by implementing our modi ed
Fischlin's signing algorithm as a secure
two-party computation. The security of our
scheme assumes the hardness of the strong RSA
problem and decisional composite residuosity.
\bibpar
We discuss a security aw in the e cient
statefull variant of Fischlin's signature
scheme. \bibpar
Next, we introduce a threshold RSA scheme which
is almost as e cient as the fastest previous
threshold RSA scheme (by Shoup), but where two
assumptions needed in Shoup's and in previous
schemes can be dropped, namely that the modulus
must be a product of safe primes and that a
trusted dealer generates the keys. The
robustness (but not the unforgeability) of our
scheme depends on a new intractability
assumption, in addition to security of the
underlying standard RSA scheme. \bibpar
Finally, we propose the concept of ne-grained
forward secure signature schemes, which not
only provides non-repudiation w.r.t. past time
periods the way ordinary forward secure
signature schemes do, but in addition allows
the signer to specify which signatures of the
current time period remain valid in case the
public key needs to be revoked. This is an
important advantage if the signer processes
many signatures per time period as otherwise
the signer would have to re-issue those
signatures (and possibly re-negotiate the
respective messages) with a new key. Apart from
a formal model we present practical
instantiations and prove them secure under the
strong RSA assumption only, i.e., we do not
resort to the random oracle model to prove
security. As a sideresult, we provide an
ordinary forward-secure scheme whose key-update
time is signi cantly smaller than for other
known schemes that do not assume random oracle
model (ROM)."
}
@techreport{BRICS-DS-03-10,
author = "Fehr, Serge",
title = "Secure Multi-Player Protocols: Fundamentals,
Generality, and Efficiency ",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-10",
address = daimi,
month = aug,
note = "PhD thesis. xii+125~pp",
comment = "Defence: August~8, 2003 ",
abstract = "While classically cryptography is concerned
with the problem of private communication among
two entities, say {\em players}, in modern
cryptography {\em multi}-player protocols play
an important role. And among these, it is
probably fair to say that {\em secret sharing},
and its stronger version {\em verifiable secret
sharing (VSS)}, as well as {\em multi-party
computation} (MPC) belong to the most appealing
and/or useful ones. The former two are basic
tools to achieve better robustness of
cryptographic schemes against malfunction or
misuse by ``decentralizing'' the security from
one single to a whole group of individuals
(captured by the term {\em threshold}
cryptography). The latter allows---at least in
principle---to execute {\em any} collaboration
among a group of players in a {\em secure} way
that guarantees the correctness of the outcome
but simultaneously respects the privacy of the
participants.\bibpar
In this work, we study three aspects of secret
sharing, VSS and MPC, which we denote by {\em
fundamentals}, {\em generality}, and {\em
efficiency}. By fundamentals we mean the quest
for understanding {\em why} a protocol works
and is secure in abstract (and hopefully
simple) mathematical terms. By generality we
mean generality with respect to the underlying
mathematical structure, in other words,
minimizing the mathematical {\em axioms}
required to do some task. And efficiency of
course deals with the improvement of protocols
with respect to some meaningful complexity
measure. \bibpar
We briefly summarize our main results. (1) We
give a complete characterization of {\em
black-box} secret sharing in terms of simple
algebraic conditions on the integer sharing
coefficients, and we propose a black-box secret
sharing scheme with minimal expansion factor.
Note that, in contrast to the classical
field-based secret sharing schemes, a black-box
secret sharing scheme allows to share a secret
sampled from an arbitrary Abelian group and
requires only black-box access to the group
operations and to random group elements. Such a
scheme may be very useful in the construction
of threshold cryptosystems based on groups with
secret order (like~RSA). (2) We show that
without loss of efficiency, MPC can be based on
arbitrary finite rings. This is in sharp
contrast to the literature where essentially
all MPC protocols require a much stronger
mathematical structure, namely a field. Apart
from its theoretical value, this can lead to
efficiency improvements since it allows a
greater freedom in the (mathematical)
representation of the task that needs to be
securely executed. (3) We propose a unified
treatment of perfectly secure linear VSS and
distributed commitments (a weaker version of
the former), and we show that the security of
such a scheme can be reduced to a linear
algebra condition. The security of all known
schemes follows as corollaries whose proofs are
pure linear algebra arguments, in contrast to
some hybrid arguments used in the literature.
(4) We construct a new unconditionally secure
VSS scheme with minimized reconstruction
complexity in the setting of a dishonest
minority. This improves on the reconstruction
complexity of the previously best known scheme
without affecting the (asymptotic) complexity
of the sharing phase. In the context of MPC
with {\em pre-processing}, our scheme allows to
improve the computation phase of earlier
protocols without increasing the complexity of
the pre-processing. (5) Finally, we consider
{\em commitment multiplication proofs}, which
allow to prove a multiplicative relation among
three commitments, and which play a crucial
role in computationally secure MPC. We study a
{\em non-interactive} solution which works in a
{\em distributed-verifier} setting and
essentially consists of a few executions of
Pedersen's VSS. We simplify and improve the
protocol, and we point out a (previously
overlooked) property which helps to construct
non-interactive proofs of {\em partial
knowledge} in this setting. This allows for
instance to prove the knowledge of $\ell$ out
of $m$ given secrets, without revealing which
ones. We also construct a non-interactive
distributed-verifier proof of {\em circuit
satisfiability}, which---in principle---allows
to prove {\em anything} that can be proven
without giving away the proof.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-9,
author = "Jurik, Mads J.",
title = "Extensions to the Paillier Cryptosystem with
Applications to Cryptological Protocols",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-9",
address = daimi,
month = aug,
note = "PhD thesis. xii+117~pp",
comment = "Defence: August~7, 2003 ",
abstract = "The main contribution of this thesis is a
simplification, a generalization and some
modifications of the homomorphic cryptosystem
proposed by Paillier in 1999, and several
cryptological protocols that follow from these
changes. \bibpar
The Paillier cryptosystem is an additive
homomorphic cryptosystem, meaning that one can
combine ciphertexts into a new ciphertext that
is the encryption of the sum of the messages of
the original ciphertexts. The cryptosystem uses
arithmetic over the group ${\bf Z}_{n^2}^*$ and
the cryptosystem can encrypt messages from the
group ${\bf Z}_n$. In this thesis the
cryptosystem is generalized to work over the
group ${\bf Z}_{n^{s+1}}^*$ for any integer $s
> 0$ with plaintexts from the group ${\bf
Z}_{n^s}$. This has the advantage that the
ciphertext is only a factor of $(s+1) / s$
longer than the plaintext, which is an
improvement to the factor of 2 in the Paillier
cryptosystem. The generalized cryptosystem is
also simplified in some ways, which results in
a threshold decryption that is conceptually
simpler than other proposals. Another
cryptosystem is also proposed that is
length-flexible, i.e. given a fixed public key,
the sender can choose the $s$ when the message
is encrypted and use the message space of ${\bf
Z}_{n^s}$. This new system is modified using
some El Gamal elements to create a cryptosystem
that is both length-flexible and has an
efficient threshold decryption. This new system
has the added feature, that with a globally
setup RSA modulus $n$, provers can efficiently
prove various relations on plaintexts inside
ciphertexts made using different public keys.
Using these cryptosystems several multi-party
protocols are proposed:
\begin{itemize}
\item A mix-net, which is a tool for making an
unknown random permutation of a list of
ciphertext. This makes it a useful tool for
achieving anonymity.
\item Several voting systems:
\begin{itemize}
\item An efficient large scale election
system capable of handling large elections
with many candidates.
\item Client/server trade-offs: 1) a system
where vote size is within a constant of the
minimal size, and 2) a system where a voter
is protected even when voting from a
hostile environment (i.e. a Trojan infested
computer). Both of these improvements are
achieved at the cost of some extra
computations at the server side.
\item A small scale election with perfect
ballot secrecy (i.e. any group of persons
only learns what follows directly from
their votes and the final result) usable
e.g. for board room election.
\end{itemize}
\item A key escrow system, which allows an
observer to decrypt any message sent using
any public key set up in the defined way.
This is achieved even though the servers only
store a constant amount of key material.
\end{itemize}
\noindent The last contribution of this thesis
is a petition system based on the modified Weil
pairing. This system greatly improves the naive
implementations using normal signatures from
using an order of ${\cal O}(tk)$ group
operations to using only ${\cal O}(t + k)$,
where $t$ is the number of signatures checked,
and $k$ is the security parameter.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-8,
author = "Nielsen, Jesper Buus",
title = "On Protocol Security in the Cryptographic
Model ",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-8",
address = daimi,
month = aug,
note = "PhD thesis. xiv+341~pp",
comment = "Defence: August~7, 2003",
abstract = "It seems to be a generally acknowledged fact
that you should never trust a computer and that
you should trust the person operating the
computer even less. This in particular becomes
a problem when the party that you do not trust
is one which is separated from you and is one
on which you depend, e.g. because that party is
the holder of some data or some capability that
you need in order to operate correctly. How do
you perform a given task involving interaction
with other parties while allowing these parties
a minimal influence on you and, if privacy is
an issue, allowing them to learn as little
about you as possible. This is the general
problem of secure {\em multiparty computation}.
The usual way of formalizing the problem is to
say that a number of parties who do not trust
each other wish to compute some function of
their local inputs, while keeping their inputs
as secret as possible and guaranteeing the
correctness of the output. Both goals should be
obtained even if some parties stop
participating or some malevolent coalition of
the parties start deviating arbitrarily from
the agreed protocol. The task is further
complicated by the fact that besides their
mutual distrust, nor do the parties trust the
channels by which they communicate. A general
solution to the secure multiparty computation
problem is a compiler which given any feasible
function describes an efficient protocol which
allows the parties to compute the function
securely on their local inputs over an open
network.\bibpar
Over the past twenty years the secure
multiparty computation problem has been the
subject of a large body of research, both
research into the models of multiparty
computation and research aimed at realizing
general secure multiparty computation. The main
approach to realizing secure multiparty
computation has been based on verifiable secret
sharing as computation, where each party holds
a secret share of each input and during the
execution computes secret shares of all
intermediate values. This approach allows the
parties to keep all inputs and intermediate
values secret and to pool the shares of the
output values to learn exactly these
values.\bibpar
More recently an approach based on joint
homomorphic encryption was introduced, allowing
for an efficient realization of general
multiparty computation secure against an
eavesdropper. A joint encryption scheme is an
encryption scheme where all parties can
encrypt, but where it takes the participation
of all parties to open an encryption. The
computation then starts by all parties
broadcasting encryptions of their inputs and
progresses through computing encryptions of the
intermediary values using the homomorphic
properties of the encryption scheme. The
encryptions of the outputs can then be jointly
decrypted.\bibpar
In this dissertation we extend this approach by
using threshold homomorphic encryption to
provide full-fledged security against an active
attacker which might control some of the
parties and which might take over parties
during the course of the protocol execution. As
opposed to a joint encryption scheme a
threshold encryption scheme only requires that
a given number of the parties are operating
correctly for decryption to be possible. In
this dissertation we show that threshold
homomorphic encryption allows to solve the
secure general multiparty computation problem
more efficiently than previous approaches to
the problem.\bibpar
Starting from an open point-to-point network
there is a long way to general secure
multiparty computation. The dissertation
contains contributions at several points along
the way. In particular we investigate how to
realize {\em secure channels}. We also show how
threshold signature schemes can be used to
efficiently realize a {\em broadcast channel}
and how threshold cryptography can be used to
provide the parties of the network with a
source of {\em shared randomness}. All three
tools are well-known to be extremely powerful
resources in a network, and our principle
contribution is to provide efficient
realizations.\bibpar
The dissertation also contains contributions to
the definitional part of the field of
multiparty computation. Most significantly we
augment the recent model of {\em universally
composable security} with a formal notion of
what it means for a protocol to only realize a
given problem under a given {\em restricted
input-output behavior} of the environment. This
e.g. allows us to give the first specification
of a {\em universally composable zero-knowledge
proof of membership} which does not at the same
time require the proof system to be a proof of
knowledge.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-7,
author = "C{\'a}ccamo, Mario Jos{\'e}",
title = "A Formal Calculus for Categories",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-7",
address = daimi,
month = jun,
note = "PhD thesis. xiv+151",
comment = "Defence: June~23, 2003",
abstract = "This dissertation studies the logic
underlying category theory. In particular we
present a formal calculus for reasoning about
universal properties. The aim is to systematise
judgements about {\em functoriality} and {\em
naturality} central to categorical reasoning.
The calculus is based on a language which
extends the typed lambda calculus with new
binders to represent universal constructions.
The types of the languages are interpreted as
locally small categories and the expressions
represent functors. \bibpar
The logic supports a syntactic treatment of
universality and duality. Contravariance
requires a definition of universality generous
enough to deal with functors of mixed variance.
{\em Ends} generalise limits to cover these
kinds of functors and moreover provide the
basis for a very convenient algebraic
manipulation of expressions. \bibpar
The equational theory of the lambda calculus is
extended with new rules for the definitions of
universal properties. These judgements express
the existence of natural isomorphisms between
functors. The isomorphisms allow us to
formalise in the calculus results like the
Yoneda lemma and Fubini theorem for ends. The
definitions of limits and ends are given as
{\em representations} for special {\bf
Set}-valued functors where {\bf Set} is the
category of sets and functions. This
establishes the basis for a more calculational
presentation of category theory rather than the
traditional diagrammatic one. \bibpar
The presence of structural rules as primitive
in the calculus together with the rule for
duality give rise to issues concerning the {\em
coherence} of the system. As for every
well-typed expression-in-context there are
several possible derivations it is sensible to
ask whether they result in the same
interpretation. For the functoriality
judgements the system is coherent up to natural
isomorphism. In the case of naturality
judgements a simple example shows its
incoherence. However in many situations to know
there exists a natural isomorphism is enough.
As one of the contributions in this
dissertation, the calculus provides a useful
tool to verify that a functor is continuous by
just establishing the existence of a certain
natural isomorphism. \bibpar
Finally, we investigate how to generalise the
calculus to enriched categories. Here we lose
the ability to manipulate contexts through
weakening and contraction and conical limits
are not longer adequate.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-6,
author = "Ursem, Rasmus K.",
title = "Models for Evolutionary Algorithms and Their
Applications in System Identification and
Control Optimization",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-6",
address = daimi,
month = jun,
note = "PhD thesis. xiv+183~pp",
comment = "Defence: June~20, 2003",
abstract = "In recent years, optimization algorithms have
received increasing attention by the research
community as well as the industry. In the area
of evolutionary computation (EC), inspiration
for optimization algorithms originates in
Darwin's ideas of evolution and survival of the
fittest. Such algorithms simulate an
evolutionary process where the goal is to
evolve solutions by means of crossover,
mutation, and selection based on their quality
(fitness) with respect to the optimization
problem at hand. Evolutionary algorithms (EAs)
are highly relevant for industrial
applications, because they are capable of
handling problems with non-linear constraints,
multiple objectives, and dynamic components ---
properties that frequently appear in real-world
problems. \bibpar
This thesis presents research in three
fundamental areas of EC; fitness function
design, methods for parameter control, and
techniques for multimodal optimization. In
addition to general investigations in these
areas, I introduce a number of algorithms and
demonstrate their potential on real-world
problems in system identification and control.
Furthermore, I investigate dynamic optimization
problems in the context of the three
fundamental areas as well as control, which is
a field where real-world dynamic problems
appear. \bibpar
Regarding fitness function design, smoothness
of the fitness landscape is of primary concern,
because a too rugged landscape may disrupt the
search and lead to premature convergence at
local optima. Rugged fitness landscapes
typically arise from imprecisions in the
fitness calculation or low relatedness between
neighboring solutions in the search space. The
imprecision problem was investigated on the
Runge-Kutta-Fehlberg numerical integrator in
the context of non-linear differential
equations. Regarding the relatedness problem
for the search space of arithmetic functions,
Thiemo Krink and I suggested the smooth
operator genetic programming algorithm. This
approach improves the smoothness of fitness
function by allowing a gradual change between
traditional operators such as multiplication
and division. \bibpar
In the area of parameter control, I
investigated the so-called self-adaptation
technique on dynamic problems. In
self-adaptation, the genome of the individual
contains the parameters that are used to modify
the individual. Self-adaptation was developed
for static problems; however, the parameter
control approach requires a significant number
of generations before superior parameters are
evolved. In my study, I experimented with two
artificial dynamic problems and showed that the
technique fails on even rather simple
time-varying problems. In a different study on
static problems, Thiemo Krink and I suggested
the terrain-based patchwork model, which is a
fundamentally new approach to parameter control
based on agents moving in a spatial grid world.
\bibpar
For multimodal optimization problems,
algorithms are typically designed with two
objectives in mind. First, the algorithm shall
find the global optimum and avoid stagnation at
local optima. Additionally, the algorithm shall
preferably find several candidate solutions,
and thereby allow a final human decision among
the found solutions. For this objective, I
created the multinational EA that employs a
self-organizing population structure grouping
the individuals into a number of subpopulations
located in different parts of the search space.
In a related study, I investigated the
robustness of the widely used sharing
technique. Surprisingly, I found that this
algorithm is extremely sensitive to the range
of fitness values. In a third investigation, I
introduced the diversity-guided EA, which uses
a population diversity measure to guide the
search. The potential of this algorithm was
demonstrated on parameter identification of two
induction motor models, which are used in the
pumps produced by the Danish pump manufacturer
Grundfos.",
abstract1 = "\bibpar
The field of dynamic optimization has received
significant attention since 1990. However, most
research performed in an EC-context has focused
on artificial dynamic problems. In a
fundamental study, Thiemo Krink, Mikkel T.
Jensen, Zbigniew Michalewicz, and I
investigated artificial dynamic problems and
found no clear connection (if any) to
real-world dynamic problems. In conclusion, a
large part of this research field's foundation,
i.e., the test problems, is highly
questionable. In continuation of this, Thiemo
Krink, Bogdan Filipi{\v{c}}, and I investigated
online control problems, which have the special
property that the search changes the problem.
In this context, we examined how to utilize the
available computation time in the best way
between updates of the control signals. For an
EA, the available time can be a trade-off
between population size and number of
generations. From our experiments, we concluded
that the best approach was to have a small
population and many generations, which
essentially turns the problem into a series of
related static problems. To our surprise, the
control problem could easily be solved when
optimized like this. To further examine this,
we compared the EA with a particle swarm and a
local search approach, which we developed for
dynamic optimization in general. The three
algorithms had matching performance when
properly tuned. An interesting result from this
investigation was that different step-sizes in
the local search algorithm induced different
control strategies, i.e., the search strategy
lead to the emergence of alternative paths of
the moving optima in the dynamic landscape.
This observation is captured in the novel
concept of {\em optima in time}, which we
introduced as a temporal version of the usual
optima the in search space.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-5,
author = "Milicia, Giuseppe",
title = "Applying Formal Methods to Programming
Language Design and Implementation",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-5",
address = daimi,
month = jun,
note = "PhD thesis. xvi+211",
comment = "Defence: June~16, 2003",
abstract = "This thesis concerns the design and evolution
of two programming languages. The two
contributions, unrelated with each other, are
treated separately. The underlying theme of
this work is the application of {\em formal
methods} to the task of programming language
design and implementation.\bibpar
{\bf Jeeg}\bibpar
We introduce Jeeg, a dialect of Java based on a
declarative replacement of the synchronization
mechanisms of Java that results in a complete
decoupling of the ``business'' and the
``synchronization'' code of classes.
Synchronization constraints in Jeeg are
expressed in a linear temporal logic which
allows to effectively limit the occurrence of
the inheritance anomaly that commonly affects
concurrent object oriented languages. Jeeg is
inspired by the current trend in aspect
oriented languages. In a Jeeg program the
sequential and concurrent aspects of object
behaviors are decoupled: specified separately
by the programmer these are then weaved
together by the Jeeg compiler.\bibpar
$\chi${\bf-Spaces}\bibpar
It is of paramount importance that a security
protocol effectively enforces the desired
security requirements. The apparent simplicity
of informal protocol descriptions hides the
complex interactions of a security protocol
which often invalidate informal correctness
arguments and justify the effort of formal
protocol verification. Verification, however,
is usually carried out on an abstract model not
at all related with a protocol's
implementation. Experience shows that security
breaches introduced in implementations of
successfully verified models are rather common.
The $\chi$-Spaces framework is a tool meant to
support every step of the protocol's life-cycle
in a uniform manner. The core of the framework
is a domain specific programming language based
on SPL (Security Protocol Language) a formal
model for studying security protocols. This
simple, yet powerful, language allows the
implementation of security protocols in a
concise an precise manner. Its underlying
formal model, based on SPL, gives the power to
prove interesting properties of the actual
protocol implementation. Simulation,
benchmarking and testing of the protocols is
immediate using the $\chi$-Sim tool."
}
@techreport{BRICS-DS-03-4,
author = "Crazzolara, Federico",
title = "Language, Semantics, and Methods for Security
Protocols",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-4",
address = daimi,
month = may,
note = "PhD thesis. xii+160",
comment = "Defence: May~15, 2003",
abstract = "Security protocols help in establishing
secure channels between communicating systems.
Great care needs therefore to be taken in
developing and implementing robust protocols.
The complexity of security-protocol
interactions can hide, however, security
weaknesses that only a formal analysis can
reveal. The last few years have seen the
emergence of successful intensional,
event-based, formal approaches to reasoning
about security protocols. The methods are
concerned with reasoning about the events that
a security protocol can perform, and make use
of a causal dependency that exists between
events. Methods like strand spaces and the
inductive method of Paulson have been designed
to support an intensional, event-based, style
of reasoning. These methods have successfully
tackled a number of protocols though in an {\em
ad hoc} fashion. They make an informal spring
from a protocol to its representation and do
not address how to build up protocol
representations in a compositional fashion.
\bibpar
This dissertation presents a new, event-based
approach to reasoning about security protocols.
We seek a broader class of models to show how
event-based models can be structured in a
compositional way and so used to give a formal
semantics to security protocols which supports
proofs of their correctness. More precisely, we
give a compositional event-based semantics to
an economical, but expressive, language for
describing security protocols (SPL); so the
events and dependency of a wide range of
protocols are determined once and for all. The
net semantics allows the derivation of general
properties and proof principles the use of
which is demonstrated in establishing security
properties for a number of protocols. The NSL
public-key protocol, the ISO 5-pass
authentication and the $\Pi_3$ key-translation
protocols are analysed for their level of
secrecy and authentication and for their
robustness in runs where some session-keys are
compromised. Particularly useful in the
analysis are general results that show which
events of a protocol run can not be those of a
spy. \bibpar
The net semantics of SPL is formally related to
a transition semantics which can easily be
implemented. (An existing SPL implementation
bridges the gap that often exists between
abstract protocol models on which verification
of security properties is carried out and the
implemented protocol.) SPL-nets are a
particular kind of contextual Petri-nets. They
have persistent conditions and as we show in
this thesis, unfold under reasonable
assumptions to a more basic kind of nets. We
relate SPL-nets to strand spaces and inductive
rules, as well as trace languages and event
structures so unifying a range of approaches,
as well as providing conditions under which
particular, more limited, models are adequate
for the analysis of protocols. The relations
that are described in this thesis help
integrate process calculi and algebraic
reasoning with event-based methods to obtain a
more accurate analysis of security properties
of protocols.",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-3,
author = "Srba, Ji{\v{r}}{\'\i}",
title = "Decidability and Complexity Issues for
Infinite-State Processes",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-3",
address = daimi,
note = "PhD thesis. xii+171~pp",
comment = "Defence: April~14, 2003",
abstract = "This thesis studies decidability and
complexity border-lines for algorithmic
verification of infinite-state systems.
Verification techniques for finite-state
processes are a successful approach for the
validation of reactive programs operating over
finite domains. When infinite data domains are
introduced, most of the verification problems
become in general undecidable. By imposing
certain restrictions on the considered models
(while still including infinite-state spaces),
it is possible to provide algorithmic decision
procedures for a variety of properties. Hence
the models of infinite-state systems can be
classified according to which verification
problems are decidable, and in the positive
case according to complexity
considerations.\bibpar
This thesis aims to contribute to the problems
mentioned above by studying decidability and
complexity questions of equivalence checking
problems, i.e., the problems whether two
infinite-state processes are equivalent with
regard to some suitable notion of behavioural
equivalence. In particular, our interest is
focused on strong and weak bisimilarity
checking within classical models of
infinite-state processes. \bibpar
Throughout the thesis, processes are modelled
by the formalism of process rewrite systems
which provides a uniform classification of all
the considered classes of infinite-state
processes.\bibpar
We begin our exposition of infinite-state
systems by having a closer look at the very
basic model of labelled transition systems. We
demonstrate a simple geometrical encoding of
the labelled systems into unlabelled ones by
transforming labels into linear paths of
different lengths. The encoding preserves the
answers to the strong bisimilarity checking and
is shown to be effective e.g. for the classes
of pushdown processes and Petri nets. This
gives several decidability and complexity
corollaries about unlabelled variants of the
models.\bibpar
We continue by developing a general
decidability theorem for commutative process
algebras like deadlock-sensitive BPP (basic
parallel processes), lossy BPP, interrupt BPP
and timed-arc BPP nets. The introduced
technique relies on the tableau-based proof of
decidability of strong bisimilarity for BPP and
we extend it in several ways to provide a wider
applicability range of the technique. This, in
particular, implies that all the mentioned
classes of commutative process algebras allow
for algorithmic checking of strong
bisimilarity. \bibpar
Another topic studied in the thesis deals with
finding tight complexity estimates for strong
and weak bisimilarity checking. A novel
technique called existential quantification is
explicitly described and used to show that the
strong bisimilarity and regularity problems for
basic process algebra and basic parallel
processes are PSPACE-hard. In case of weak
bisimilarity checking of basic parallel
processes the problems are shown to be again
PSPACE-hard --- this time, however, also for
the normed subclass. \bibpar
Finally we consider the problems of weak
bisimilarity checking for the classes of
pushdown processes and PA-processes. Negative
answers to the problems are given --- both
problems are proven to be undecidable. The
proof techniques moreover solve some other open
problems. For example the undecidability proof
for pushdown processes implies also
undecidability of strong bisimilarity for
prefix-recognizable graphs. \bibpar
The thesis is concluded with an overview of the
state-of-the-art for strong and weak
bisimilarity problems within the classes from
the hierarchy of process rewrite systems. ",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-DS-03-2,
author = "Valencia, Frank D.",
title = "Temporal Concurrent Constraint Programming",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-2",
address = daimi,
month = feb,
note = "PhD thesis. xvii+174",
comment = "Defence: February~5, 2003",
abstract = "{\em Concurrent constraint programming} (ccp)
is a formalism for concurrency in which agents
interact with one another by telling (adding)
and asking (reading) information in a shared
medium. Temporal ccp extends ccp by allowing
agents to be constrained by time conditions.
This dissertation studies temporal ccp as a
model of concurrency for discrete-timed
systems. The study is conducted by developing a
process calculus called {\tt ntcc}. \bibpar
The {\tt ntcc} calculus generalizes the tcc
model, the latter being a temporal ccp model
for {\em deterministic} and {\em synchronous
timed reactive} systems. The calculus is built
upon few basic ideas but it captures several
aspects of timed systems. As tcc, {\tt ntcc}
can model unit delays, time-outs, pre-emption
and synchrony. Additionally, it can model {\em
unbounded but finite delays, bounded
eventuality, asynchrony} and {\em
nondeterminism}. The applicability of the
calculus is illustrated with several
interesting examples of discrete-time systems
involving mutable data structures, robotic
devices, multi-agent systems and music
applications. \bibpar
The calculus is provided with a {\em
denotational semantics} that captures the
reactive computations of processes in the
presence of arbitrary environments. The
denotation is proven to be {\em fully-abstract}
for a substantial fragment of the calculus.
This dissertation identifies the exact
technical problems (arising mainly from
allowing nondeterminism, locality and time-outs
in the calculus) that makes it impossible to
obtain a fully abstract result for the full
language of {\tt ntcc}. \bibpar
Also, the calculus is provided with a process
logic for expressing {\em temporal
specifications} of {\tt ntcc} processes. This
dissertation introduces a ({\em relatively})
{\em complete inference system} to prove that a
given {\tt ntcc}process satisfies a given
formula in this logic. The denotation, process
logic and inference system presented in this
dissertation significantly extend and
strengthen similar developments for tcc and
(untimed) ccp. \bibpar
This dissertation addresses the {\em
decidability} of various behavioral
equivalences for the calculus and {\em
characterizes} their corresponding induced
congruences. The equivalences (and their
associated congruences) are proven to be
decidable for a significant fragment of the
calculus. The decidability results involve a
systematic translation of processes into finite
state B{\"u}chi automata. To the author's best
knowledge the study of decidability for ccp
equivalences is original to this work. \bibpar
Furthermore, this dissertation deepens the
understanding of previous ccp work by
establishing an {\em expressive power
hierarchy} of several temporal ccp languages
which were proposed in the literature by other
authors. These languages, represented in this
dissertation as {\em variants} of {\tt ntcc} ,
differ in their way of defining infinite
behavior (i.e., {\em replication} or {\em
recursion}) and the scope of variables (i.e.,
{\em static} or {\em dynamic scope}). In
particular, it is shown that (1) recursive
procedures with parameters can be encoded into
parameterless recursive procedures with dynamic
scoping, and vice-versa; (2) replication can be
encoded into parameterless recursive procedures
with static scoping, and vice-versa; (3) the
languages from (1) are {\em strictly more
expressive} than the languages from (2). (Thus,
in this family of languages recursion is more
expressive than replication and dynamic scope
is more expressive than static scope.)
Moreover, it is shown that behavioral
equivalence is {\em undecidable} for the
languages from (l), but {\em decidable} for the
languages from (2). Interestingly, the
undecidability result holds even if the process
variables take values from a {\em fixed finite
domain} whilst the decidability holds for {\em
arbitrary domains}. \bibpar
Both the expressive power hierarchy and
decidability/undecidability results give a
clear distinction among the various temporal
ccp languages. Also, the distinction between
static and dynamic scoping helps to clarify the
situation in the (untimed) ccp family of
languages. Moreover, the methods used in the
decidability results may provide a framework to
perform further systematic investigations of
temporal ccp languages."
}
@techreport{BRICS-DS-03-1,
author = "Brabrand, Claus",
title = "Domain Specific Languages for Interactive Web
Services",
institution = brics,
year = 2003,
type = ds,
number = "DS-03-1",
address = daimi,
month = jan,
note = "PhD thesis. xiv+214~pp",
abstract = "This dissertation shows how domain specific
languages may be applied to the domain of
interactive Web services to obtain flexible,
safe, and efficient solutions.\bibpar
We show how each of four key aspects of
interactive Web services involving sessions,
dynamic creation of HTML/XML documents, form
field input validation, and concurrency
control, may benefit from the design of a
dedicated language.\bibpar
Also, we show how a notion of metamorphic
syntax macros facilitates integration of these
individual domain specific languages into a
complete language. \bibpar
The result is a domain specific language, \verb
!{}!, that supports virtually all
aspects of the development of interactive Web
services and provides flexible, safe, and
efficient solutions",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}